]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Socket and pipe I/O utilities used in rsync. | |
3 | * | |
4 | * Copyright (C) 1996-2001 Andrew Tridgell | |
5 | * Copyright (C) 1996 Paul Mackerras | |
6 | * Copyright (C) 2001, 2002 Martin Pool <mbp@samba.org> | |
7 | * Copyright (C) 2003-2022 Wayne Davison | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 3 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License along | |
20 | * with this program; if not, visit the http://fsf.org website. | |
21 | */ | |
22 | ||
23 | /* Rsync provides its own multiplexing system, which is used to send | |
24 | * stderr and stdout over a single socket. | |
25 | * | |
26 | * For historical reasons this is off during the start of the | |
27 | * connection, but it's switched on quite early using | |
28 | * io_start_multiplex_out() and io_start_multiplex_in(). */ | |
29 | ||
30 | #include "rsync.h" | |
31 | #include "ifuncs.h" | |
32 | #include "inums.h" | |
33 | ||
34 | /** If no timeout is specified then use a 60 second select timeout */ | |
35 | #define SELECT_TIMEOUT 60 | |
36 | ||
37 | extern int bwlimit; | |
38 | extern size_t bwlimit_writemax; | |
39 | extern int io_timeout; | |
40 | extern int am_server; | |
41 | extern int am_sender; | |
42 | extern int am_receiver; | |
43 | extern int am_generator; | |
44 | extern int local_server; | |
45 | extern int msgs2stderr; | |
46 | extern int inc_recurse; | |
47 | extern int io_error; | |
48 | extern int batch_fd; | |
49 | extern int eol_nulls; | |
50 | extern int flist_eof; | |
51 | extern int file_total; | |
52 | extern int file_old_total; | |
53 | extern int list_only; | |
54 | extern int read_batch; | |
55 | extern int compat_flags; | |
56 | extern int protect_args; | |
57 | extern int checksum_seed; | |
58 | extern int xfer_sum_len; | |
59 | extern int daemon_connection; | |
60 | extern int protocol_version; | |
61 | extern int remove_source_files; | |
62 | extern int preserve_hard_links; | |
63 | extern BOOL extra_flist_sending_enabled; | |
64 | extern BOOL flush_ok_after_signal; | |
65 | extern struct stats stats; | |
66 | extern time_t stop_at_utime; | |
67 | extern struct file_list *cur_flist; | |
68 | #ifdef ICONV_OPTION | |
69 | extern int filesfrom_convert; | |
70 | extern iconv_t ic_send, ic_recv; | |
71 | #endif | |
72 | ||
73 | int csum_length = SHORT_SUM_LENGTH; /* initial value */ | |
74 | int allowed_lull = 0; | |
75 | int msgdone_cnt = 0; | |
76 | int forward_flist_data = 0; | |
77 | BOOL flist_receiving_enabled = False; | |
78 | ||
79 | /* Ignore an EOF error if non-zero. See whine_about_eof(). */ | |
80 | int kluge_around_eof = 0; | |
81 | int got_kill_signal = -1; /* is set to 0 only after multiplexed I/O starts */ | |
82 | ||
83 | int sock_f_in = -1; | |
84 | int sock_f_out = -1; | |
85 | ||
86 | int64 total_data_read = 0; | |
87 | int64 total_data_written = 0; | |
88 | ||
89 | char num_dev_ino_buf[4 + 8 + 8]; | |
90 | ||
91 | static struct { | |
92 | xbuf in, out, msg; | |
93 | int in_fd; | |
94 | int out_fd; /* Both "out" and "msg" go to this fd. */ | |
95 | int in_multiplexed; | |
96 | unsigned out_empty_len; | |
97 | size_t raw_data_header_pos; /* in the out xbuf */ | |
98 | size_t raw_flushing_ends_before; /* in the out xbuf */ | |
99 | size_t raw_input_ends_before; /* in the in xbuf */ | |
100 | } iobuf = { .in_fd = -1, .out_fd = -1 }; | |
101 | ||
102 | static time_t last_io_in; | |
103 | static time_t last_io_out; | |
104 | ||
105 | static int write_batch_monitor_in = -1; | |
106 | static int write_batch_monitor_out = -1; | |
107 | ||
108 | static int ff_forward_fd = -1; | |
109 | static int ff_reenable_multiplex = -1; | |
110 | static char ff_lastchar = '\0'; | |
111 | static xbuf ff_xb = EMPTY_XBUF; | |
112 | #ifdef ICONV_OPTION | |
113 | static xbuf iconv_buf = EMPTY_XBUF; | |
114 | #endif | |
115 | static int select_timeout = SELECT_TIMEOUT; | |
116 | static int active_filecnt = 0; | |
117 | static OFF_T active_bytecnt = 0; | |
118 | static int first_message = 1; | |
119 | ||
120 | static char int_byte_extra[64] = { | |
121 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (00 - 3F)/4 */ | |
122 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (40 - 7F)/4 */ | |
123 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* (80 - BF)/4 */ | |
124 | 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, /* (C0 - FF)/4 */ | |
125 | }; | |
126 | ||
127 | /* Our I/O buffers are sized with no bits on in the lowest byte of the "size" | |
128 | * (indeed, our rounding of sizes in 1024-byte units assures more than this). | |
129 | * This allows the code that is storing bytes near the physical end of a | |
130 | * circular buffer to temporarily reduce the buffer's size (in order to make | |
131 | * some storing idioms easier), while also making it simple to restore the | |
132 | * buffer's actual size when the buffer's "pos" wraps around to the start (we | |
133 | * just round the buffer's size up again). */ | |
134 | ||
135 | #define IOBUF_WAS_REDUCED(siz) ((siz) & 0xFF) | |
136 | #define IOBUF_RESTORE_SIZE(siz) (((siz) | 0xFF) + 1) | |
137 | ||
138 | #define IN_MULTIPLEXED (iobuf.in_multiplexed != 0) | |
139 | #define IN_MULTIPLEXED_AND_READY (iobuf.in_multiplexed > 0) | |
140 | #define OUT_MULTIPLEXED (iobuf.out_empty_len != 0) | |
141 | ||
142 | #define PIO_NEED_INPUT (1<<0) /* The *_NEED_* flags are mutually exclusive. */ | |
143 | #define PIO_NEED_OUTROOM (1<<1) | |
144 | #define PIO_NEED_MSGROOM (1<<2) | |
145 | ||
146 | #define PIO_CONSUME_INPUT (1<<4) /* Must becombined with PIO_NEED_INPUT. */ | |
147 | ||
148 | #define PIO_INPUT_AND_CONSUME (PIO_NEED_INPUT | PIO_CONSUME_INPUT) | |
149 | #define PIO_NEED_FLAGS (PIO_NEED_INPUT | PIO_NEED_OUTROOM | PIO_NEED_MSGROOM) | |
150 | ||
151 | #define REMOTE_OPTION_ERROR "rsync: on remote machine: -" | |
152 | #define REMOTE_OPTION_ERROR2 ": unknown option" | |
153 | ||
154 | #define FILESFROM_BUFLEN 2048 | |
155 | ||
156 | enum festatus { FES_SUCCESS, FES_REDO, FES_NO_SEND }; | |
157 | ||
158 | static flist_ndx_list redo_list, hlink_list; | |
159 | ||
160 | static void read_a_msg(void); | |
161 | static void drain_multiplex_messages(void); | |
162 | static void sleep_for_bwlimit(int bytes_written); | |
163 | ||
164 | static void check_timeout(BOOL allow_keepalive, int keepalive_flags) | |
165 | { | |
166 | time_t t, chk; | |
167 | ||
168 | /* On the receiving side, the generator is now the one that decides | |
169 | * when a timeout has occurred. When it is sifting through a lot of | |
170 | * files looking for work, it will be sending keep-alive messages to | |
171 | * the sender, and even though the receiver won't be sending/receiving | |
172 | * anything (not even keep-alive messages), the successful writes to | |
173 | * the sender will keep things going. If the receiver is actively | |
174 | * receiving data, it will ensure that the generator knows that it is | |
175 | * not idle by sending the generator keep-alive messages (since the | |
176 | * generator might be blocked trying to send checksums, it needs to | |
177 | * know that the receiver is active). Thus, as long as one or the | |
178 | * other is successfully doing work, the generator will not timeout. */ | |
179 | if (!io_timeout) | |
180 | return; | |
181 | ||
182 | t = time(NULL); | |
183 | ||
184 | if (allow_keepalive) { | |
185 | /* This may put data into iobuf.msg w/o flushing. */ | |
186 | maybe_send_keepalive(t, keepalive_flags); | |
187 | } | |
188 | ||
189 | if (!last_io_in) | |
190 | last_io_in = t; | |
191 | ||
192 | if (am_receiver) | |
193 | return; | |
194 | ||
195 | chk = MAX(last_io_out, last_io_in); | |
196 | if (t - chk >= io_timeout) { | |
197 | if (am_server) | |
198 | msgs2stderr = 1; | |
199 | rprintf(FERROR, "[%s] io timeout after %d seconds -- exiting\n", | |
200 | who_am_i(), (int)(t-chk)); | |
201 | exit_cleanup(RERR_TIMEOUT); | |
202 | } | |
203 | } | |
204 | ||
205 | /* It's almost always an error to get an EOF when we're trying to read from the | |
206 | * network, because the protocol is (for the most part) self-terminating. | |
207 | * | |
208 | * There is one case for the receiver when it is at the end of the transfer | |
209 | * (hanging around reading any keep-alive packets that might come its way): if | |
210 | * the sender dies before the generator's kill-signal comes through, we can end | |
211 | * up here needing to loop until the kill-signal arrives. In this situation, | |
212 | * kluge_around_eof will be < 0. | |
213 | * | |
214 | * There is another case for older protocol versions (< 24) where the module | |
215 | * listing was not terminated, so we must ignore an EOF error in that case and | |
216 | * exit. In this situation, kluge_around_eof will be > 0. */ | |
217 | static NORETURN void whine_about_eof(BOOL allow_kluge) | |
218 | { | |
219 | if (kluge_around_eof && allow_kluge) { | |
220 | int i; | |
221 | if (kluge_around_eof > 0) | |
222 | exit_cleanup(0); | |
223 | /* If we're still here after 10 seconds, exit with an error. */ | |
224 | for (i = 10*1000/20; i--; ) | |
225 | msleep(20); | |
226 | } | |
227 | ||
228 | rprintf(FERROR, RSYNC_NAME ": connection unexpectedly closed " | |
229 | "(%s bytes received so far) [%s]\n", | |
230 | big_num(stats.total_read), who_am_i()); | |
231 | ||
232 | exit_cleanup(RERR_STREAMIO); | |
233 | } | |
234 | ||
235 | /* Do a safe read, handling any needed looping and error handling. | |
236 | * Returns the count of the bytes read, which will only be different | |
237 | * from "len" if we encountered an EOF. This routine is not used on | |
238 | * the socket except very early in the transfer. */ | |
239 | static size_t safe_read(int fd, char *buf, size_t len) | |
240 | { | |
241 | size_t got = 0; | |
242 | ||
243 | assert(fd != iobuf.in_fd); | |
244 | ||
245 | while (1) { | |
246 | struct timeval tv; | |
247 | fd_set r_fds, e_fds; | |
248 | int cnt; | |
249 | ||
250 | FD_ZERO(&r_fds); | |
251 | FD_SET(fd, &r_fds); | |
252 | FD_ZERO(&e_fds); | |
253 | FD_SET(fd, &e_fds); | |
254 | tv.tv_sec = select_timeout; | |
255 | tv.tv_usec = 0; | |
256 | ||
257 | cnt = select(fd+1, &r_fds, NULL, &e_fds, &tv); | |
258 | if (cnt <= 0) { | |
259 | if (cnt < 0 && errno == EBADF) { | |
260 | rsyserr(FERROR, errno, "safe_read select failed"); | |
261 | exit_cleanup(RERR_FILEIO); | |
262 | } | |
263 | check_timeout(1, MSK_ALLOW_FLUSH); | |
264 | continue; | |
265 | } | |
266 | ||
267 | /*if (FD_ISSET(fd, &e_fds)) | |
268 | rprintf(FINFO, "select exception on fd %d\n", fd); */ | |
269 | ||
270 | if (FD_ISSET(fd, &r_fds)) { | |
271 | ssize_t n = read(fd, buf + got, len - got); | |
272 | if (DEBUG_GTE(IO, 2)) { | |
273 | rprintf(FINFO, "[%s] safe_read(%d)=%" SIZE_T_FMT_MOD "d\n", | |
274 | who_am_i(), fd, (SIZE_T_FMT_CAST)n); | |
275 | } | |
276 | if (n == 0) | |
277 | break; | |
278 | if (n < 0) { | |
279 | if (errno == EINTR) | |
280 | continue; | |
281 | rsyserr(FERROR, errno, "safe_read failed to read %" SIZE_T_FMT_MOD "d bytes", | |
282 | (SIZE_T_FMT_CAST)len); | |
283 | exit_cleanup(RERR_STREAMIO); | |
284 | } | |
285 | if ((got += (size_t)n) == len) | |
286 | break; | |
287 | } | |
288 | } | |
289 | ||
290 | return got; | |
291 | } | |
292 | ||
293 | static const char *what_fd_is(int fd) | |
294 | { | |
295 | static char buf[20]; | |
296 | ||
297 | if (fd == sock_f_out) | |
298 | return "socket"; | |
299 | else if (fd == iobuf.out_fd) | |
300 | return "message fd"; | |
301 | else if (fd == batch_fd) | |
302 | return "batch file"; | |
303 | else { | |
304 | snprintf(buf, sizeof buf, "fd %d", fd); | |
305 | return buf; | |
306 | } | |
307 | } | |
308 | ||
309 | /* Do a safe write, handling any needed looping and error handling. | |
310 | * Returns only if everything was successfully written. This routine | |
311 | * is not used on the socket except very early in the transfer. */ | |
312 | static void safe_write(int fd, const char *buf, size_t len) | |
313 | { | |
314 | ssize_t n; | |
315 | ||
316 | assert(fd != iobuf.out_fd); | |
317 | ||
318 | n = write(fd, buf, len); | |
319 | if ((size_t)n == len) | |
320 | return; | |
321 | if (n < 0) { | |
322 | if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) { | |
323 | write_failed: | |
324 | rsyserr(FERROR, errno, | |
325 | "safe_write failed to write %" SIZE_T_FMT_MOD "d bytes to %s", | |
326 | (SIZE_T_FMT_CAST)len, what_fd_is(fd)); | |
327 | exit_cleanup(RERR_STREAMIO); | |
328 | } | |
329 | } else { | |
330 | buf += n; | |
331 | len -= n; | |
332 | } | |
333 | ||
334 | while (len) { | |
335 | struct timeval tv; | |
336 | fd_set w_fds; | |
337 | int cnt; | |
338 | ||
339 | FD_ZERO(&w_fds); | |
340 | FD_SET(fd, &w_fds); | |
341 | tv.tv_sec = select_timeout; | |
342 | tv.tv_usec = 0; | |
343 | ||
344 | cnt = select(fd + 1, NULL, &w_fds, NULL, &tv); | |
345 | if (cnt <= 0) { | |
346 | if (cnt < 0 && errno == EBADF) { | |
347 | rsyserr(FERROR, errno, "safe_write select failed on %s", what_fd_is(fd)); | |
348 | exit_cleanup(RERR_FILEIO); | |
349 | } | |
350 | if (io_timeout) | |
351 | maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH); | |
352 | continue; | |
353 | } | |
354 | ||
355 | if (FD_ISSET(fd, &w_fds)) { | |
356 | n = write(fd, buf, len); | |
357 | if (n < 0) { | |
358 | if (errno == EINTR) | |
359 | continue; | |
360 | goto write_failed; | |
361 | } | |
362 | buf += n; | |
363 | len -= n; | |
364 | } | |
365 | } | |
366 | } | |
367 | ||
368 | /* This is only called when files-from data is known to be available. We read | |
369 | * a chunk of data and put it into the output buffer. */ | |
370 | static void forward_filesfrom_data(void) | |
371 | { | |
372 | ssize_t len; | |
373 | ||
374 | len = read(ff_forward_fd, ff_xb.buf + ff_xb.len, ff_xb.size - ff_xb.len); | |
375 | if (len <= 0) { | |
376 | if (len == 0 || errno != EINTR) { | |
377 | /* Send end-of-file marker */ | |
378 | ff_forward_fd = -1; | |
379 | write_buf(iobuf.out_fd, "\0\0", ff_lastchar ? 2 : 1); | |
380 | free_xbuf(&ff_xb); | |
381 | if (ff_reenable_multiplex >= 0) | |
382 | io_start_multiplex_out(ff_reenable_multiplex); | |
383 | free_implied_include_partial_string(); | |
384 | } | |
385 | return; | |
386 | } | |
387 | ||
388 | if (DEBUG_GTE(IO, 2)) { | |
389 | rprintf(FINFO, "[%s] files-from read=%" SIZE_T_FMT_MOD "d\n", | |
390 | who_am_i(), (SIZE_T_FMT_CAST)len); | |
391 | } | |
392 | ||
393 | #ifdef ICONV_OPTION | |
394 | len += ff_xb.len; | |
395 | #endif | |
396 | ||
397 | if (!eol_nulls) { | |
398 | char *s = ff_xb.buf + len; | |
399 | /* Transform CR and/or LF into '\0' */ | |
400 | while (s-- > ff_xb.buf) { | |
401 | if (*s == '\n' || *s == '\r') | |
402 | *s = '\0'; | |
403 | } | |
404 | } | |
405 | ||
406 | if (ff_lastchar) | |
407 | ff_xb.pos = 0; | |
408 | else { | |
409 | char *s = ff_xb.buf; | |
410 | /* Last buf ended with a '\0', so don't let this buf start with one. */ | |
411 | while (len && *s == '\0') | |
412 | s++, len--; | |
413 | ff_xb.pos = s - ff_xb.buf; | |
414 | } | |
415 | ||
416 | #ifdef ICONV_OPTION | |
417 | if (filesfrom_convert && len) { | |
418 | char *sob = ff_xb.buf + ff_xb.pos, *s = sob; | |
419 | char *eob = sob + len; | |
420 | int flags = ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT; | |
421 | if (ff_lastchar == '\0') | |
422 | flags |= ICB_INIT; | |
423 | /* Convert/send each null-terminated string separately, skipping empties. */ | |
424 | while (s != eob) { | |
425 | if (*s++ == '\0') { | |
426 | ff_xb.len = s - sob - 1; | |
427 | add_implied_include(sob, 0); | |
428 | if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0) | |
429 | exit_cleanup(RERR_PROTOCOL); /* impossible? */ | |
430 | write_buf(iobuf.out_fd, s-1, 1); /* Send the '\0'. */ | |
431 | while (s != eob && *s == '\0') | |
432 | s++; | |
433 | sob = s; | |
434 | ff_xb.pos = sob - ff_xb.buf; | |
435 | flags |= ICB_INIT; | |
436 | } | |
437 | } | |
438 | ||
439 | if ((ff_xb.len = s - sob) == 0) | |
440 | ff_lastchar = '\0'; | |
441 | else { | |
442 | /* Handle a partial string specially, saving any incomplete chars. */ | |
443 | implied_include_partial_string(sob, s); | |
444 | flags &= ~ICB_INCLUDE_INCOMPLETE; | |
445 | if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0) { | |
446 | if (errno == E2BIG) | |
447 | exit_cleanup(RERR_PROTOCOL); /* impossible? */ | |
448 | if (ff_xb.pos) | |
449 | memmove(ff_xb.buf, ff_xb.buf + ff_xb.pos, ff_xb.len); | |
450 | } | |
451 | ff_lastchar = 'x'; /* Anything non-zero. */ | |
452 | } | |
453 | } else | |
454 | #endif | |
455 | ||
456 | if (len) { | |
457 | char *f = ff_xb.buf + ff_xb.pos; | |
458 | char *t = ff_xb.buf; | |
459 | char *eob = f + len; | |
460 | char *cur = t; | |
461 | /* Eliminate any multi-'\0' runs. */ | |
462 | while (f != eob) { | |
463 | if (!(*t++ = *f++)) { | |
464 | add_implied_include(cur, 0); | |
465 | cur = t; | |
466 | while (f != eob && *f == '\0') | |
467 | f++; | |
468 | } | |
469 | } | |
470 | implied_include_partial_string(cur, t); | |
471 | ff_lastchar = f[-1]; | |
472 | if ((len = t - ff_xb.buf) != 0) { | |
473 | /* This will not circle back to perform_io() because we only get | |
474 | * called when there is plenty of room in the output buffer. */ | |
475 | write_buf(iobuf.out_fd, ff_xb.buf, len); | |
476 | } | |
477 | } | |
478 | } | |
479 | ||
480 | void reduce_iobuf_size(xbuf *out, size_t new_size) | |
481 | { | |
482 | if (new_size < out->size) { | |
483 | /* Avoid weird buffer interactions by only outputting this to stderr. */ | |
484 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 4)) { | |
485 | const char *name = out == &iobuf.out ? "iobuf.out" | |
486 | : out == &iobuf.msg ? "iobuf.msg" | |
487 | : NULL; | |
488 | if (name) { | |
489 | rprintf(FINFO, "[%s] reduced size of %s (-%d)\n", | |
490 | who_am_i(), name, (int)(out->size - new_size)); | |
491 | } | |
492 | } | |
493 | out->size = new_size; | |
494 | } | |
495 | } | |
496 | ||
497 | void restore_iobuf_size(xbuf *out) | |
498 | { | |
499 | if (IOBUF_WAS_REDUCED(out->size)) { | |
500 | size_t new_size = IOBUF_RESTORE_SIZE(out->size); | |
501 | /* Avoid weird buffer interactions by only outputting this to stderr. */ | |
502 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 4)) { | |
503 | const char *name = out == &iobuf.out ? "iobuf.out" | |
504 | : out == &iobuf.msg ? "iobuf.msg" | |
505 | : NULL; | |
506 | if (name) { | |
507 | rprintf(FINFO, "[%s] restored size of %s (+%d)\n", | |
508 | who_am_i(), name, (int)(new_size - out->size)); | |
509 | } | |
510 | } | |
511 | out->size = new_size; | |
512 | } | |
513 | } | |
514 | ||
515 | static void handle_kill_signal(BOOL flush_ok) | |
516 | { | |
517 | got_kill_signal = -1; | |
518 | flush_ok_after_signal = flush_ok; | |
519 | exit_cleanup(RERR_SIGNAL); | |
520 | } | |
521 | ||
522 | /* Perform buffered input and/or output until specified conditions are met. | |
523 | * When given a "needed" read or write request, this returns without doing any | |
524 | * I/O if the needed input bytes or write space is already available. Once I/O | |
525 | * is needed, this will try to do whatever reading and/or writing is currently | |
526 | * possible, up to the maximum buffer allowances, no matter if this is a read | |
527 | * or write request. However, the I/O stops as soon as the required input | |
528 | * bytes or output space is available. If this is not a read request, the | |
529 | * routine may also do some advantageous reading of messages from a multiplexed | |
530 | * input source (which ensures that we don't jam up with everyone in their | |
531 | * "need to write" code and nobody reading the accumulated data that would make | |
532 | * writing possible). | |
533 | * | |
534 | * The iobuf.in, .out and .msg buffers are all circular. Callers need to be | |
535 | * aware that some data copies will need to be split when the bytes wrap around | |
536 | * from the end to the start. In order to help make writing into the output | |
537 | * buffers easier for some operations (such as the use of SIVAL() into the | |
538 | * buffer) a buffer may be temporarily shortened by a small amount, but the | |
539 | * original size will be automatically restored when the .pos wraps to the | |
540 | * start. See also the 3 raw_* iobuf vars that are used in the handling of | |
541 | * MSG_DATA bytes as they are read-from/written-into the buffers. | |
542 | * | |
543 | * When writing, we flush data in the following priority order: | |
544 | * | |
545 | * 1. Finish writing any in-progress MSG_DATA sequence from iobuf.out. | |
546 | * | |
547 | * 2. Write out all the messages from the message buf (if iobuf.msg is active). | |
548 | * Yes, this means that a PIO_NEED_OUTROOM call will completely flush any | |
549 | * messages before getting to the iobuf.out flushing (except for rule 1). | |
550 | * | |
551 | * 3. Write out the raw data from iobuf.out, possibly filling in the multiplexed | |
552 | * MSG_DATA header that was pre-allocated (when output is multiplexed). | |
553 | * | |
554 | * TODO: items for possible future work: | |
555 | * | |
556 | * - Make this routine able to read the generator-to-receiver batch flow? | |
557 | * | |
558 | * Unlike the old routines that this replaces, it is OK to read ahead as far as | |
559 | * we can because the read_a_msg() routine now reads its bytes out of the input | |
560 | * buffer. In the old days, only raw data was in the input buffer, and any | |
561 | * unused raw data in the buf would prevent the reading of socket data. */ | |
562 | static char *perform_io(size_t needed, int flags) | |
563 | { | |
564 | fd_set r_fds, e_fds, w_fds; | |
565 | struct timeval tv; | |
566 | int cnt, max_fd; | |
567 | size_t empty_buf_len = 0; | |
568 | xbuf *out; | |
569 | char *data; | |
570 | ||
571 | if (iobuf.in.len == 0 && iobuf.in.pos != 0) { | |
572 | if (iobuf.raw_input_ends_before) | |
573 | iobuf.raw_input_ends_before -= iobuf.in.pos; | |
574 | iobuf.in.pos = 0; | |
575 | } | |
576 | ||
577 | switch (flags & PIO_NEED_FLAGS) { | |
578 | case PIO_NEED_INPUT: | |
579 | /* We never resize the circular input buffer. */ | |
580 | if (iobuf.in.size < needed) { | |
581 | rprintf(FERROR, "need to read %" SIZE_T_FMT_MOD "d bytes," | |
582 | " iobuf.in.buf is only %" SIZE_T_FMT_MOD "d bytes.\n", | |
583 | (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)iobuf.in.size); | |
584 | exit_cleanup(RERR_PROTOCOL); | |
585 | } | |
586 | ||
587 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) { | |
588 | rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d, %sinput)\n", | |
589 | who_am_i(), (SIZE_T_FMT_CAST)needed, flags & PIO_CONSUME_INPUT ? "consume&" : ""); | |
590 | } | |
591 | break; | |
592 | ||
593 | case PIO_NEED_OUTROOM: | |
594 | /* We never resize the circular output buffer. */ | |
595 | if (iobuf.out.size - iobuf.out_empty_len < needed) { | |
596 | fprintf(stderr, "need to write %" SIZE_T_FMT_MOD "d bytes," | |
597 | " iobuf.out.buf is only %" SIZE_T_FMT_MOD "d bytes.\n", | |
598 | (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)(iobuf.out.size - iobuf.out_empty_len)); | |
599 | exit_cleanup(RERR_PROTOCOL); | |
600 | } | |
601 | ||
602 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) { | |
603 | rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d," | |
604 | " outroom) needs to flush %" SIZE_T_FMT_MOD "d\n", | |
605 | who_am_i(), (SIZE_T_FMT_CAST)needed, | |
606 | iobuf.out.len + needed > iobuf.out.size | |
607 | ? (SIZE_T_FMT_CAST)(iobuf.out.len + needed - iobuf.out.size) : (SIZE_T_FMT_CAST)0); | |
608 | } | |
609 | break; | |
610 | ||
611 | case PIO_NEED_MSGROOM: | |
612 | /* We never resize the circular message buffer. */ | |
613 | if (iobuf.msg.size < needed) { | |
614 | fprintf(stderr, "need to write %" SIZE_T_FMT_MOD "d bytes," | |
615 | " iobuf.msg.buf is only %" SIZE_T_FMT_MOD "d bytes.\n", | |
616 | (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)iobuf.msg.size); | |
617 | exit_cleanup(RERR_PROTOCOL); | |
618 | } | |
619 | ||
620 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) { | |
621 | rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d," | |
622 | " msgroom) needs to flush %" SIZE_T_FMT_MOD "d\n", | |
623 | who_am_i(), (SIZE_T_FMT_CAST)needed, | |
624 | iobuf.msg.len + needed > iobuf.msg.size | |
625 | ? (SIZE_T_FMT_CAST)(iobuf.msg.len + needed - iobuf.msg.size) : (SIZE_T_FMT_CAST)0); | |
626 | } | |
627 | break; | |
628 | ||
629 | case 0: | |
630 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) { | |
631 | rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d, %d)\n", | |
632 | who_am_i(), (SIZE_T_FMT_CAST)needed, flags); | |
633 | } | |
634 | break; | |
635 | ||
636 | default: | |
637 | exit_cleanup(RERR_UNSUPPORTED); | |
638 | } | |
639 | ||
640 | while (1) { | |
641 | switch (flags & PIO_NEED_FLAGS) { | |
642 | case PIO_NEED_INPUT: | |
643 | if (iobuf.in.len >= needed) | |
644 | goto double_break; | |
645 | break; | |
646 | case PIO_NEED_OUTROOM: | |
647 | /* Note that iobuf.out_empty_len doesn't factor into this check | |
648 | * because iobuf.out.len already holds any needed header len. */ | |
649 | if (iobuf.out.len + needed <= iobuf.out.size) | |
650 | goto double_break; | |
651 | break; | |
652 | case PIO_NEED_MSGROOM: | |
653 | if (iobuf.msg.len + needed <= iobuf.msg.size) | |
654 | goto double_break; | |
655 | break; | |
656 | } | |
657 | ||
658 | max_fd = -1; | |
659 | ||
660 | FD_ZERO(&r_fds); | |
661 | FD_ZERO(&e_fds); | |
662 | if (iobuf.in_fd >= 0 && iobuf.in.size - iobuf.in.len) { | |
663 | if (!read_batch || batch_fd >= 0) { | |
664 | FD_SET(iobuf.in_fd, &r_fds); | |
665 | FD_SET(iobuf.in_fd, &e_fds); | |
666 | } | |
667 | if (iobuf.in_fd > max_fd) | |
668 | max_fd = iobuf.in_fd; | |
669 | } | |
670 | ||
671 | /* Only do more filesfrom processing if there is enough room in the out buffer. */ | |
672 | if (ff_forward_fd >= 0 && iobuf.out.size - iobuf.out.len > FILESFROM_BUFLEN*2) { | |
673 | FD_SET(ff_forward_fd, &r_fds); | |
674 | if (ff_forward_fd > max_fd) | |
675 | max_fd = ff_forward_fd; | |
676 | } | |
677 | ||
678 | FD_ZERO(&w_fds); | |
679 | if (iobuf.out_fd >= 0) { | |
680 | if (iobuf.raw_flushing_ends_before | |
681 | || (!iobuf.msg.len && iobuf.out.len > iobuf.out_empty_len && !(flags & PIO_NEED_MSGROOM))) { | |
682 | if (OUT_MULTIPLEXED && !iobuf.raw_flushing_ends_before) { | |
683 | /* The iobuf.raw_flushing_ends_before value can point off the end | |
684 | * of the iobuf.out buffer for a while, for easier subtracting. */ | |
685 | iobuf.raw_flushing_ends_before = iobuf.out.pos + iobuf.out.len; | |
686 | ||
687 | SIVAL(iobuf.out.buf + iobuf.raw_data_header_pos, 0, | |
688 | ((MPLEX_BASE + (int)MSG_DATA)<<24) + iobuf.out.len - 4); | |
689 | ||
690 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 1)) { | |
691 | rprintf(FINFO, "[%s] send_msg(%d, %" SIZE_T_FMT_MOD "d)\n", | |
692 | who_am_i(), (int)MSG_DATA, (SIZE_T_FMT_CAST)iobuf.out.len - 4); | |
693 | } | |
694 | ||
695 | /* reserve room for the next MSG_DATA header */ | |
696 | iobuf.raw_data_header_pos = iobuf.raw_flushing_ends_before; | |
697 | if (iobuf.raw_data_header_pos >= iobuf.out.size) | |
698 | iobuf.raw_data_header_pos -= iobuf.out.size; | |
699 | else if (iobuf.raw_data_header_pos + 4 > iobuf.out.size) { | |
700 | /* The 4-byte header won't fit at the end of the buffer, | |
701 | * so we'll temporarily reduce the output buffer's size | |
702 | * and put the header at the start of the buffer. */ | |
703 | reduce_iobuf_size(&iobuf.out, iobuf.raw_data_header_pos); | |
704 | iobuf.raw_data_header_pos = 0; | |
705 | } | |
706 | /* Yes, it is possible for this to make len > size for a while. */ | |
707 | iobuf.out.len += 4; | |
708 | } | |
709 | ||
710 | empty_buf_len = iobuf.out_empty_len; | |
711 | out = &iobuf.out; | |
712 | } else if (iobuf.msg.len) { | |
713 | empty_buf_len = 0; | |
714 | out = &iobuf.msg; | |
715 | } else | |
716 | out = NULL; | |
717 | if (out) { | |
718 | FD_SET(iobuf.out_fd, &w_fds); | |
719 | if (iobuf.out_fd > max_fd) | |
720 | max_fd = iobuf.out_fd; | |
721 | } | |
722 | } else | |
723 | out = NULL; | |
724 | ||
725 | if (max_fd < 0) { | |
726 | switch (flags & PIO_NEED_FLAGS) { | |
727 | case PIO_NEED_INPUT: | |
728 | iobuf.in.len = 0; | |
729 | if (kluge_around_eof == 2) | |
730 | exit_cleanup(0); | |
731 | if (iobuf.in_fd == -2) | |
732 | whine_about_eof(True); | |
733 | rprintf(FERROR, "error in perform_io: no fd for input.\n"); | |
734 | exit_cleanup(RERR_PROTOCOL); | |
735 | case PIO_NEED_OUTROOM: | |
736 | case PIO_NEED_MSGROOM: | |
737 | msgs2stderr = 1; | |
738 | drain_multiplex_messages(); | |
739 | if (iobuf.out_fd == -2) | |
740 | whine_about_eof(True); | |
741 | rprintf(FERROR, "error in perform_io: no fd for output.\n"); | |
742 | exit_cleanup(RERR_PROTOCOL); | |
743 | default: | |
744 | /* No stated needs, so I guess this is OK. */ | |
745 | break; | |
746 | } | |
747 | break; | |
748 | } | |
749 | ||
750 | if (got_kill_signal > 0) | |
751 | handle_kill_signal(True); | |
752 | ||
753 | if (extra_flist_sending_enabled) { | |
754 | if (file_total - file_old_total < MAX_FILECNT_LOOKAHEAD && IN_MULTIPLEXED_AND_READY) | |
755 | tv.tv_sec = 0; | |
756 | else { | |
757 | extra_flist_sending_enabled = False; | |
758 | tv.tv_sec = select_timeout; | |
759 | } | |
760 | } else | |
761 | tv.tv_sec = select_timeout; | |
762 | tv.tv_usec = 0; | |
763 | ||
764 | cnt = select(max_fd + 1, &r_fds, &w_fds, &e_fds, &tv); | |
765 | ||
766 | if (cnt <= 0) { | |
767 | if (cnt < 0 && errno == EBADF) { | |
768 | msgs2stderr = 1; | |
769 | exit_cleanup(RERR_SOCKETIO); | |
770 | } | |
771 | if (extra_flist_sending_enabled) { | |
772 | extra_flist_sending_enabled = False; | |
773 | send_extra_file_list(sock_f_out, -1); | |
774 | extra_flist_sending_enabled = !flist_eof; | |
775 | } else | |
776 | check_timeout((flags & PIO_NEED_INPUT) != 0, 0); | |
777 | FD_ZERO(&r_fds); /* Just in case... */ | |
778 | FD_ZERO(&w_fds); | |
779 | } | |
780 | ||
781 | if (iobuf.in_fd >= 0 && FD_ISSET(iobuf.in_fd, &r_fds)) { | |
782 | size_t len, pos = iobuf.in.pos + iobuf.in.len; | |
783 | ssize_t n; | |
784 | if (pos >= iobuf.in.size) { | |
785 | pos -= iobuf.in.size; | |
786 | len = iobuf.in.size - iobuf.in.len; | |
787 | } else | |
788 | len = iobuf.in.size - pos; | |
789 | if ((n = read(iobuf.in_fd, iobuf.in.buf + pos, len)) <= 0) { | |
790 | if (n == 0) { | |
791 | /* Signal that input has become invalid. */ | |
792 | if (!read_batch || batch_fd < 0 || am_generator) | |
793 | iobuf.in_fd = -2; | |
794 | batch_fd = -1; | |
795 | continue; | |
796 | } | |
797 | if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) | |
798 | n = 0; | |
799 | else { | |
800 | /* Don't write errors on a dead socket. */ | |
801 | if (iobuf.in_fd == sock_f_in) { | |
802 | if (am_sender) | |
803 | msgs2stderr = 1; | |
804 | rsyserr(FERROR_SOCKET, errno, "read error"); | |
805 | } else | |
806 | rsyserr(FERROR, errno, "read error"); | |
807 | exit_cleanup(RERR_SOCKETIO); | |
808 | } | |
809 | } | |
810 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) { | |
811 | rprintf(FINFO, "[%s] recv=%" SIZE_T_FMT_MOD "d\n", | |
812 | who_am_i(), (SIZE_T_FMT_CAST)n); | |
813 | } | |
814 | ||
815 | if (io_timeout) { | |
816 | last_io_in = time(NULL); | |
817 | if (io_timeout && flags & PIO_NEED_INPUT) | |
818 | maybe_send_keepalive(last_io_in, 0); | |
819 | } | |
820 | stats.total_read += n; | |
821 | ||
822 | iobuf.in.len += n; | |
823 | } | |
824 | ||
825 | if (stop_at_utime && time(NULL) >= stop_at_utime) { | |
826 | rprintf(FERROR, "stopping at requested limit\n"); | |
827 | exit_cleanup(RERR_TIMEOUT); | |
828 | } | |
829 | ||
830 | if (out && FD_ISSET(iobuf.out_fd, &w_fds)) { | |
831 | size_t len = iobuf.raw_flushing_ends_before ? iobuf.raw_flushing_ends_before - out->pos : out->len; | |
832 | ssize_t n; | |
833 | ||
834 | if (bwlimit_writemax && len > bwlimit_writemax) | |
835 | len = bwlimit_writemax; | |
836 | ||
837 | if (out->pos + len > out->size) | |
838 | len = out->size - out->pos; | |
839 | if ((n = write(iobuf.out_fd, out->buf + out->pos, len)) <= 0) { | |
840 | if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) | |
841 | n = 0; | |
842 | else { | |
843 | /* Don't write errors on a dead socket. */ | |
844 | msgs2stderr = 1; | |
845 | iobuf.out_fd = -2; | |
846 | iobuf.out.len = iobuf.msg.len = iobuf.raw_flushing_ends_before = 0; | |
847 | rsyserr(FERROR_SOCKET, errno, "write error"); | |
848 | drain_multiplex_messages(); | |
849 | exit_cleanup(RERR_SOCKETIO); | |
850 | } | |
851 | } | |
852 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) { | |
853 | rprintf(FINFO, "[%s] %s sent=%" SIZE_T_FMT_MOD "d\n", | |
854 | who_am_i(), out == &iobuf.out ? "out" : "msg", (SIZE_T_FMT_CAST)n); | |
855 | } | |
856 | ||
857 | if (io_timeout) | |
858 | last_io_out = time(NULL); | |
859 | stats.total_written += n; | |
860 | ||
861 | if (bwlimit_writemax) | |
862 | sleep_for_bwlimit(n); | |
863 | ||
864 | if ((out->pos += n) == out->size) { | |
865 | if (iobuf.raw_flushing_ends_before) | |
866 | iobuf.raw_flushing_ends_before -= out->size; | |
867 | out->pos = 0; | |
868 | restore_iobuf_size(out); | |
869 | } else if (out->pos == iobuf.raw_flushing_ends_before) | |
870 | iobuf.raw_flushing_ends_before = 0; | |
871 | if ((out->len -= n) == empty_buf_len) { | |
872 | out->pos = 0; | |
873 | restore_iobuf_size(out); | |
874 | if (empty_buf_len) | |
875 | iobuf.raw_data_header_pos = 0; | |
876 | } | |
877 | } | |
878 | ||
879 | if (got_kill_signal > 0) | |
880 | handle_kill_signal(True); | |
881 | ||
882 | /* We need to help prevent deadlock by doing what reading | |
883 | * we can whenever we are here trying to write. */ | |
884 | if (IN_MULTIPLEXED_AND_READY && !(flags & PIO_NEED_INPUT)) { | |
885 | while (!iobuf.raw_input_ends_before && iobuf.in.len > 512) | |
886 | read_a_msg(); | |
887 | if (flist_receiving_enabled && iobuf.in.len > 512) | |
888 | wait_for_receiver(); /* generator only */ | |
889 | } | |
890 | ||
891 | if (ff_forward_fd >= 0 && FD_ISSET(ff_forward_fd, &r_fds)) { | |
892 | /* This can potentially flush all output and enable | |
893 | * multiplexed output, so keep this last in the loop | |
894 | * and be sure to not cache anything that would break | |
895 | * such a change. */ | |
896 | forward_filesfrom_data(); | |
897 | } | |
898 | } | |
899 | double_break: | |
900 | ||
901 | if (got_kill_signal > 0) | |
902 | handle_kill_signal(True); | |
903 | ||
904 | data = iobuf.in.buf + iobuf.in.pos; | |
905 | ||
906 | if (flags & PIO_CONSUME_INPUT) { | |
907 | iobuf.in.len -= needed; | |
908 | iobuf.in.pos += needed; | |
909 | if (iobuf.in.pos == iobuf.raw_input_ends_before) | |
910 | iobuf.raw_input_ends_before = 0; | |
911 | if (iobuf.in.pos >= iobuf.in.size) { | |
912 | iobuf.in.pos -= iobuf.in.size; | |
913 | if (iobuf.raw_input_ends_before) | |
914 | iobuf.raw_input_ends_before -= iobuf.in.size; | |
915 | } | |
916 | } | |
917 | ||
918 | return data; | |
919 | } | |
920 | ||
921 | static void raw_read_buf(char *buf, size_t len) | |
922 | { | |
923 | size_t pos = iobuf.in.pos; | |
924 | char *data = perform_io(len, PIO_INPUT_AND_CONSUME); | |
925 | if (iobuf.in.pos <= pos && len) { | |
926 | size_t siz = len - iobuf.in.pos; | |
927 | memcpy(buf, data, siz); | |
928 | memcpy(buf + siz, iobuf.in.buf, iobuf.in.pos); | |
929 | } else | |
930 | memcpy(buf, data, len); | |
931 | } | |
932 | ||
933 | static int32 raw_read_int(void) | |
934 | { | |
935 | char *data, buf[4]; | |
936 | if (iobuf.in.size - iobuf.in.pos >= 4) | |
937 | data = perform_io(4, PIO_INPUT_AND_CONSUME); | |
938 | else | |
939 | raw_read_buf(data = buf, 4); | |
940 | return IVAL(data, 0); | |
941 | } | |
942 | ||
943 | void noop_io_until_death(void) | |
944 | { | |
945 | char buf[1024]; | |
946 | ||
947 | if (!iobuf.in.buf || !iobuf.out.buf || iobuf.in_fd < 0 || iobuf.out_fd < 0 || kluge_around_eof) | |
948 | return; | |
949 | ||
950 | /* If we're talking to a daemon over a socket, don't short-circuit this logic */ | |
951 | if (msgs2stderr && daemon_connection >= 0) | |
952 | return; | |
953 | ||
954 | kluge_around_eof = 2; | |
955 | /* Setting an I/O timeout ensures that if something inexplicably weird | |
956 | * happens, we won't hang around forever. */ | |
957 | if (!io_timeout) | |
958 | set_io_timeout(60); | |
959 | ||
960 | while (1) | |
961 | read_buf(iobuf.in_fd, buf, sizeof buf); | |
962 | } | |
963 | ||
964 | /* Buffer a message for the multiplexed output stream. Is not used for (normal) MSG_DATA. */ | |
965 | int send_msg(enum msgcode code, const char *buf, size_t len, int convert) | |
966 | { | |
967 | char *hdr; | |
968 | size_t needed, pos; | |
969 | BOOL want_debug = DEBUG_GTE(IO, 1) && convert >= 0 && (msgs2stderr == 1 || code != MSG_INFO); | |
970 | ||
971 | if (!OUT_MULTIPLEXED) | |
972 | return 0; | |
973 | ||
974 | if (want_debug) { | |
975 | rprintf(FINFO, "[%s] send_msg(%d, %" SIZE_T_FMT_MOD "d)\n", | |
976 | who_am_i(), (int)code, (SIZE_T_FMT_CAST)len); | |
977 | } | |
978 | ||
979 | /* When checking for enough free space for this message, we need to | |
980 | * make sure that there is space for the 4-byte header, plus we'll | |
981 | * assume that we may waste up to 3 bytes (if the header doesn't fit | |
982 | * at the physical end of the buffer). */ | |
983 | #ifdef ICONV_OPTION | |
984 | if (convert > 0 && ic_send == (iconv_t)-1) | |
985 | convert = 0; | |
986 | if (convert > 0) { | |
987 | /* Ensuring double-size room leaves space for maximal conversion expansion. */ | |
988 | needed = len*2 + 4 + 3; | |
989 | } else | |
990 | #endif | |
991 | needed = len + 4 + 3; | |
992 | if (iobuf.msg.len + needed > iobuf.msg.size) { | |
993 | if (am_sender) | |
994 | perform_io(needed, PIO_NEED_MSGROOM); | |
995 | else { /* We sometimes allow the iobuf.msg size to increase to avoid a deadlock. */ | |
996 | size_t old_size = iobuf.msg.size; | |
997 | restore_iobuf_size(&iobuf.msg); | |
998 | realloc_xbuf(&iobuf.msg, iobuf.msg.size * 2); | |
999 | if (iobuf.msg.pos + iobuf.msg.len > old_size) | |
1000 | memcpy(iobuf.msg.buf + old_size, iobuf.msg.buf, iobuf.msg.pos + iobuf.msg.len - old_size); | |
1001 | } | |
1002 | } | |
1003 | ||
1004 | pos = iobuf.msg.pos + iobuf.msg.len; /* Must be set after any flushing. */ | |
1005 | if (pos >= iobuf.msg.size) | |
1006 | pos -= iobuf.msg.size; | |
1007 | else if (pos + 4 > iobuf.msg.size) { | |
1008 | /* The 4-byte header won't fit at the end of the buffer, | |
1009 | * so we'll temporarily reduce the message buffer's size | |
1010 | * and put the header at the start of the buffer. */ | |
1011 | reduce_iobuf_size(&iobuf.msg, pos); | |
1012 | pos = 0; | |
1013 | } | |
1014 | hdr = iobuf.msg.buf + pos; | |
1015 | ||
1016 | iobuf.msg.len += 4; /* Allocate room for the coming header bytes. */ | |
1017 | ||
1018 | #ifdef ICONV_OPTION | |
1019 | if (convert > 0) { | |
1020 | xbuf inbuf; | |
1021 | ||
1022 | INIT_XBUF(inbuf, (char*)buf, len, (size_t)-1); | |
1023 | ||
1024 | len = iobuf.msg.len; | |
1025 | iconvbufs(ic_send, &inbuf, &iobuf.msg, | |
1026 | ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT | ICB_INIT); | |
1027 | if (inbuf.len > 0) { | |
1028 | rprintf(FERROR, "overflowed iobuf.msg buffer in send_msg"); | |
1029 | exit_cleanup(RERR_UNSUPPORTED); | |
1030 | } | |
1031 | len = iobuf.msg.len - len; | |
1032 | } else | |
1033 | #endif | |
1034 | { | |
1035 | size_t siz; | |
1036 | ||
1037 | if ((pos += 4) == iobuf.msg.size) | |
1038 | pos = 0; | |
1039 | ||
1040 | /* Handle a split copy if we wrap around the end of the circular buffer. */ | |
1041 | if (pos >= iobuf.msg.pos && (siz = iobuf.msg.size - pos) < len) { | |
1042 | memcpy(iobuf.msg.buf + pos, buf, siz); | |
1043 | memcpy(iobuf.msg.buf, buf + siz, len - siz); | |
1044 | } else | |
1045 | memcpy(iobuf.msg.buf + pos, buf, len); | |
1046 | ||
1047 | iobuf.msg.len += len; | |
1048 | } | |
1049 | ||
1050 | SIVAL(hdr, 0, ((MPLEX_BASE + (int)code)<<24) + len); | |
1051 | ||
1052 | if (want_debug && convert > 0) { | |
1053 | rprintf(FINFO, "[%s] converted msg len=%" SIZE_T_FMT_MOD "d\n", | |
1054 | who_am_i(), (SIZE_T_FMT_CAST)len); | |
1055 | } | |
1056 | ||
1057 | return 1; | |
1058 | } | |
1059 | ||
1060 | void send_msg_int(enum msgcode code, int num) | |
1061 | { | |
1062 | char numbuf[4]; | |
1063 | ||
1064 | if (DEBUG_GTE(IO, 1)) | |
1065 | rprintf(FINFO, "[%s] send_msg_int(%d, %d)\n", who_am_i(), (int)code, num); | |
1066 | ||
1067 | SIVAL(numbuf, 0, num); | |
1068 | send_msg(code, numbuf, 4, -1); | |
1069 | } | |
1070 | ||
1071 | void send_msg_success(const char *fname, int num) | |
1072 | { | |
1073 | if (local_server) { | |
1074 | STRUCT_STAT st; | |
1075 | ||
1076 | if (DEBUG_GTE(IO, 1)) | |
1077 | rprintf(FINFO, "[%s] send_msg_success(%d)\n", who_am_i(), num); | |
1078 | ||
1079 | if (stat(fname, &st) < 0) | |
1080 | memset(&st, 0, sizeof (STRUCT_STAT)); | |
1081 | SIVAL(num_dev_ino_buf, 0, num); | |
1082 | SIVAL64(num_dev_ino_buf, 4, st.st_dev); | |
1083 | SIVAL64(num_dev_ino_buf, 4+8, st.st_ino); | |
1084 | send_msg(MSG_SUCCESS, num_dev_ino_buf, sizeof num_dev_ino_buf, -1); | |
1085 | } else | |
1086 | send_msg_int(MSG_SUCCESS, num); | |
1087 | } | |
1088 | ||
1089 | static void got_flist_entry_status(enum festatus status, int ndx) | |
1090 | { | |
1091 | struct file_list *flist = flist_for_ndx(ndx, "got_flist_entry_status"); | |
1092 | ||
1093 | if (remove_source_files) { | |
1094 | active_filecnt--; | |
1095 | active_bytecnt -= F_LENGTH(flist->files[ndx - flist->ndx_start]); | |
1096 | } | |
1097 | ||
1098 | if (inc_recurse) | |
1099 | flist->in_progress--; | |
1100 | ||
1101 | switch (status) { | |
1102 | case FES_SUCCESS: | |
1103 | if (remove_source_files) { | |
1104 | if (local_server) | |
1105 | send_msg(MSG_SUCCESS, num_dev_ino_buf, sizeof num_dev_ino_buf, -1); | |
1106 | else | |
1107 | send_msg_int(MSG_SUCCESS, ndx); | |
1108 | } | |
1109 | /* FALL THROUGH */ | |
1110 | case FES_NO_SEND: | |
1111 | #ifdef SUPPORT_HARD_LINKS | |
1112 | if (preserve_hard_links) { | |
1113 | struct file_struct *file = flist->files[ndx - flist->ndx_start]; | |
1114 | if (F_IS_HLINKED(file)) { | |
1115 | if (status == FES_NO_SEND) | |
1116 | flist_ndx_push(&hlink_list, -2); /* indicates a failure follows */ | |
1117 | flist_ndx_push(&hlink_list, ndx); | |
1118 | if (inc_recurse) | |
1119 | flist->in_progress++; | |
1120 | } | |
1121 | } | |
1122 | #endif | |
1123 | break; | |
1124 | case FES_REDO: | |
1125 | if (read_batch) { | |
1126 | if (inc_recurse) | |
1127 | flist->in_progress++; | |
1128 | break; | |
1129 | } | |
1130 | if (inc_recurse) | |
1131 | flist->to_redo++; | |
1132 | flist_ndx_push(&redo_list, ndx); | |
1133 | break; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | /* Note the fds used for the main socket (which might really be a pipe | |
1138 | * for a local transfer, but we can ignore that). */ | |
1139 | void io_set_sock_fds(int f_in, int f_out) | |
1140 | { | |
1141 | sock_f_in = f_in; | |
1142 | sock_f_out = f_out; | |
1143 | } | |
1144 | ||
1145 | void set_io_timeout(int secs) | |
1146 | { | |
1147 | io_timeout = secs; | |
1148 | allowed_lull = (io_timeout + 1) / 2; | |
1149 | ||
1150 | if (!io_timeout || allowed_lull > SELECT_TIMEOUT) | |
1151 | select_timeout = SELECT_TIMEOUT; | |
1152 | else | |
1153 | select_timeout = allowed_lull; | |
1154 | ||
1155 | if (read_batch) | |
1156 | allowed_lull = 0; | |
1157 | } | |
1158 | ||
1159 | static void check_for_d_option_error(const char *msg) | |
1160 | { | |
1161 | static char rsync263_opts[] = "BCDHIKLPRSTWabceghlnopqrtuvxz"; | |
1162 | char *colon; | |
1163 | int saw_d = 0; | |
1164 | ||
1165 | if (*msg != 'r' | |
1166 | || strncmp(msg, REMOTE_OPTION_ERROR, sizeof REMOTE_OPTION_ERROR - 1) != 0) | |
1167 | return; | |
1168 | ||
1169 | msg += sizeof REMOTE_OPTION_ERROR - 1; | |
1170 | if (*msg == '-' || (colon = strchr(msg, ':')) == NULL | |
1171 | || strncmp(colon, REMOTE_OPTION_ERROR2, sizeof REMOTE_OPTION_ERROR2 - 1) != 0) | |
1172 | return; | |
1173 | ||
1174 | for ( ; *msg != ':'; msg++) { | |
1175 | if (*msg == 'd') | |
1176 | saw_d = 1; | |
1177 | else if (*msg == 'e') | |
1178 | break; | |
1179 | else if (strchr(rsync263_opts, *msg) == NULL) | |
1180 | return; | |
1181 | } | |
1182 | ||
1183 | if (saw_d) { | |
1184 | rprintf(FWARNING, "*** Try using \"--old-d\" if remote rsync is <= 2.6.3 ***\n"); | |
1185 | } | |
1186 | } | |
1187 | ||
1188 | /* This is used by the generator to limit how many file transfers can | |
1189 | * be active at once when --remove-source-files is specified. Without | |
1190 | * this, sender-side deletions were mostly happening at the end. */ | |
1191 | void increment_active_files(int ndx, int itemizing, enum logcode code) | |
1192 | { | |
1193 | while (1) { | |
1194 | /* TODO: tune these limits? */ | |
1195 | int limit = active_bytecnt >= 128*1024 ? 10 : 50; | |
1196 | if (active_filecnt < limit) | |
1197 | break; | |
1198 | check_for_finished_files(itemizing, code, 0); | |
1199 | if (active_filecnt < limit) | |
1200 | break; | |
1201 | wait_for_receiver(); | |
1202 | } | |
1203 | ||
1204 | active_filecnt++; | |
1205 | active_bytecnt += F_LENGTH(cur_flist->files[ndx - cur_flist->ndx_start]); | |
1206 | } | |
1207 | ||
1208 | int get_redo_num(void) | |
1209 | { | |
1210 | return flist_ndx_pop(&redo_list); | |
1211 | } | |
1212 | ||
1213 | int get_hlink_num(void) | |
1214 | { | |
1215 | return flist_ndx_pop(&hlink_list); | |
1216 | } | |
1217 | ||
1218 | /* When we're the receiver and we have a local --files-from list of names | |
1219 | * that needs to be sent over the socket to the sender, we have to do two | |
1220 | * things at the same time: send the sender a list of what files we're | |
1221 | * processing and read the incoming file+info list from the sender. We do | |
1222 | * this by making recv_file_list() call forward_filesfrom_data(), which | |
1223 | * will ensure that we forward data to the sender until we get some data | |
1224 | * for recv_file_list() to use. */ | |
1225 | void start_filesfrom_forwarding(int fd) | |
1226 | { | |
1227 | if (protocol_version < 31 && OUT_MULTIPLEXED) { | |
1228 | /* Older protocols send the files-from data w/o packaging | |
1229 | * it in multiplexed I/O packets, so temporarily switch | |
1230 | * to buffered I/O to match this behavior. */ | |
1231 | iobuf.msg.pos = iobuf.msg.len = 0; /* Be extra sure no messages go out. */ | |
1232 | ff_reenable_multiplex = io_end_multiplex_out(MPLX_TO_BUFFERED); | |
1233 | } | |
1234 | ff_forward_fd = fd; | |
1235 | ||
1236 | alloc_xbuf(&ff_xb, FILESFROM_BUFLEN); | |
1237 | } | |
1238 | ||
1239 | /* Read a line into the "buf" buffer. */ | |
1240 | int read_line(int fd, char *buf, size_t bufsiz, int flags) | |
1241 | { | |
1242 | char ch, *s, *eob; | |
1243 | ||
1244 | #ifdef ICONV_OPTION | |
1245 | if (flags & RL_CONVERT && iconv_buf.size < bufsiz) | |
1246 | realloc_xbuf(&iconv_buf, ROUND_UP_1024(bufsiz) + 1024); | |
1247 | #endif | |
1248 | ||
1249 | start: | |
1250 | #ifdef ICONV_OPTION | |
1251 | s = flags & RL_CONVERT ? iconv_buf.buf : buf; | |
1252 | #else | |
1253 | s = buf; | |
1254 | #endif | |
1255 | eob = s + bufsiz - 1; | |
1256 | while (1) { | |
1257 | /* We avoid read_byte() for files because files can return an EOF. */ | |
1258 | if (fd == iobuf.in_fd) | |
1259 | ch = read_byte(fd); | |
1260 | else if (safe_read(fd, &ch, 1) == 0) | |
1261 | break; | |
1262 | if (flags & RL_EOL_NULLS ? ch == '\0' : (ch == '\r' || ch == '\n')) { | |
1263 | /* Skip empty lines if dumping comments. */ | |
1264 | if (flags & RL_DUMP_COMMENTS && s == buf) | |
1265 | continue; | |
1266 | break; | |
1267 | } | |
1268 | if (s < eob) | |
1269 | *s++ = ch; | |
1270 | } | |
1271 | *s = '\0'; | |
1272 | ||
1273 | if (flags & RL_DUMP_COMMENTS && (*buf == '#' || *buf == ';')) | |
1274 | goto start; | |
1275 | ||
1276 | #ifdef ICONV_OPTION | |
1277 | if (flags & RL_CONVERT) { | |
1278 | xbuf outbuf; | |
1279 | INIT_XBUF(outbuf, buf, 0, bufsiz); | |
1280 | iconv_buf.pos = 0; | |
1281 | iconv_buf.len = s - iconv_buf.buf; | |
1282 | iconvbufs(ic_recv, &iconv_buf, &outbuf, | |
1283 | ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_INIT); | |
1284 | outbuf.buf[outbuf.len] = '\0'; | |
1285 | return outbuf.len; | |
1286 | } | |
1287 | #endif | |
1288 | ||
1289 | return s - buf; | |
1290 | } | |
1291 | ||
1292 | void read_args(int f_in, char *mod_name, char *buf, size_t bufsiz, int rl_nulls, | |
1293 | char ***argv_p, int *argc_p, char **request_p) | |
1294 | { | |
1295 | int maxargs = MAX_ARGS; | |
1296 | int dot_pos = 0, argc = 0, request_len = 0; | |
1297 | char **argv, *p; | |
1298 | int rl_flags = (rl_nulls ? RL_EOL_NULLS : 0); | |
1299 | ||
1300 | #ifdef ICONV_OPTION | |
1301 | rl_flags |= (protect_args && ic_recv != (iconv_t)-1 ? RL_CONVERT : 0); | |
1302 | #endif | |
1303 | ||
1304 | argv = new_array(char *, maxargs); | |
1305 | if (mod_name && !protect_args) | |
1306 | argv[argc++] = "rsyncd"; | |
1307 | ||
1308 | if (request_p) | |
1309 | *request_p = NULL; | |
1310 | ||
1311 | while (1) { | |
1312 | if (read_line(f_in, buf, bufsiz, rl_flags) == 0) | |
1313 | break; | |
1314 | ||
1315 | if (argc == maxargs-1) { | |
1316 | maxargs += MAX_ARGS; | |
1317 | argv = realloc_array(argv, char *, maxargs); | |
1318 | } | |
1319 | ||
1320 | if (dot_pos) { | |
1321 | if (request_p && request_len < 1024) { | |
1322 | int len = strlen(buf); | |
1323 | if (request_len) | |
1324 | request_p[0][request_len++] = ' '; | |
1325 | *request_p = realloc_array(*request_p, char, request_len + len + 1); | |
1326 | memcpy(*request_p + request_len, buf, len + 1); | |
1327 | request_len += len; | |
1328 | } | |
1329 | if (mod_name) | |
1330 | glob_expand_module(mod_name, buf, &argv, &argc, &maxargs); | |
1331 | else | |
1332 | glob_expand(buf, &argv, &argc, &maxargs); | |
1333 | } else { | |
1334 | p = strdup(buf); | |
1335 | argv[argc++] = p; | |
1336 | if (*p == '.' && p[1] == '\0') | |
1337 | dot_pos = argc; | |
1338 | } | |
1339 | } | |
1340 | argv[argc] = NULL; | |
1341 | ||
1342 | glob_expand(NULL, NULL, NULL, NULL); | |
1343 | ||
1344 | *argc_p = argc; | |
1345 | *argv_p = argv; | |
1346 | } | |
1347 | ||
1348 | BOOL io_start_buffering_out(int f_out) | |
1349 | { | |
1350 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) | |
1351 | rprintf(FINFO, "[%s] io_start_buffering_out(%d)\n", who_am_i(), f_out); | |
1352 | ||
1353 | if (iobuf.out.buf) { | |
1354 | if (iobuf.out_fd == -1) | |
1355 | iobuf.out_fd = f_out; | |
1356 | else | |
1357 | assert(f_out == iobuf.out_fd); | |
1358 | return False; | |
1359 | } | |
1360 | ||
1361 | alloc_xbuf(&iobuf.out, ROUND_UP_1024(IO_BUFFER_SIZE * 2)); | |
1362 | iobuf.out_fd = f_out; | |
1363 | ||
1364 | return True; | |
1365 | } | |
1366 | ||
1367 | BOOL io_start_buffering_in(int f_in) | |
1368 | { | |
1369 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) | |
1370 | rprintf(FINFO, "[%s] io_start_buffering_in(%d)\n", who_am_i(), f_in); | |
1371 | ||
1372 | if (iobuf.in.buf) { | |
1373 | if (iobuf.in_fd == -1) | |
1374 | iobuf.in_fd = f_in; | |
1375 | else | |
1376 | assert(f_in == iobuf.in_fd); | |
1377 | return False; | |
1378 | } | |
1379 | ||
1380 | alloc_xbuf(&iobuf.in, ROUND_UP_1024(IO_BUFFER_SIZE)); | |
1381 | iobuf.in_fd = f_in; | |
1382 | ||
1383 | return True; | |
1384 | } | |
1385 | ||
1386 | void io_end_buffering_in(BOOL free_buffers) | |
1387 | { | |
1388 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) { | |
1389 | rprintf(FINFO, "[%s] io_end_buffering_in(IOBUF_%s_BUFS)\n", | |
1390 | who_am_i(), free_buffers ? "FREE" : "KEEP"); | |
1391 | } | |
1392 | ||
1393 | if (free_buffers) | |
1394 | free_xbuf(&iobuf.in); | |
1395 | else | |
1396 | iobuf.in.pos = iobuf.in.len = 0; | |
1397 | ||
1398 | iobuf.in_fd = -1; | |
1399 | } | |
1400 | ||
1401 | void io_end_buffering_out(BOOL free_buffers) | |
1402 | { | |
1403 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) { | |
1404 | rprintf(FINFO, "[%s] io_end_buffering_out(IOBUF_%s_BUFS)\n", | |
1405 | who_am_i(), free_buffers ? "FREE" : "KEEP"); | |
1406 | } | |
1407 | ||
1408 | io_flush(FULL_FLUSH); | |
1409 | ||
1410 | if (free_buffers) { | |
1411 | free_xbuf(&iobuf.out); | |
1412 | free_xbuf(&iobuf.msg); | |
1413 | } | |
1414 | ||
1415 | iobuf.out_fd = -1; | |
1416 | } | |
1417 | ||
1418 | void maybe_flush_socket(int important) | |
1419 | { | |
1420 | if (flist_eof && iobuf.out.buf && iobuf.out.len > iobuf.out_empty_len | |
1421 | && (important || time(NULL) - last_io_out >= 5)) | |
1422 | io_flush(NORMAL_FLUSH); | |
1423 | } | |
1424 | ||
1425 | /* Older rsync versions used to send either a MSG_NOOP (protocol 30) or a | |
1426 | * raw-data-based keep-alive (protocol 29), both of which implied forwarding of | |
1427 | * the message through the sender. Since the new timeout method does not need | |
1428 | * any forwarding, we just send an empty MSG_DATA message, which works with all | |
1429 | * rsync versions. This avoids any message forwarding, and leaves the raw-data | |
1430 | * stream alone (since we can never be quite sure if that stream is in the | |
1431 | * right state for a keep-alive message). */ | |
1432 | void maybe_send_keepalive(time_t now, int flags) | |
1433 | { | |
1434 | if (flags & MSK_ACTIVE_RECEIVER) | |
1435 | last_io_in = now; /* Fudge things when we're working hard on the files. */ | |
1436 | ||
1437 | /* Early in the transfer (before the receiver forks) the receiving side doesn't | |
1438 | * care if it hasn't sent data in a while as long as it is receiving data (in | |
1439 | * fact, a pre-3.1.0 rsync would die if we tried to send it a keep alive during | |
1440 | * this time). So, if we're an early-receiving proc, just return and let the | |
1441 | * incoming data determine if we timeout. */ | |
1442 | if (!am_sender && !am_receiver && !am_generator) | |
1443 | return; | |
1444 | ||
1445 | if (now - last_io_out >= allowed_lull) { | |
1446 | /* The receiver is special: it only sends keep-alive messages if it is | |
1447 | * actively receiving data. Otherwise, it lets the generator timeout. */ | |
1448 | if (am_receiver && now - last_io_in >= io_timeout) | |
1449 | return; | |
1450 | ||
1451 | if (!iobuf.msg.len && iobuf.out.len == iobuf.out_empty_len) | |
1452 | send_msg(MSG_DATA, "", 0, 0); | |
1453 | if (!(flags & MSK_ALLOW_FLUSH)) { | |
1454 | /* Let the caller worry about writing out the data. */ | |
1455 | } else if (iobuf.msg.len) | |
1456 | perform_io(iobuf.msg.size - iobuf.msg.len + 1, PIO_NEED_MSGROOM); | |
1457 | else if (iobuf.out.len > iobuf.out_empty_len) | |
1458 | io_flush(NORMAL_FLUSH); | |
1459 | } | |
1460 | } | |
1461 | ||
1462 | void start_flist_forward(int ndx) | |
1463 | { | |
1464 | write_int(iobuf.out_fd, ndx); | |
1465 | forward_flist_data = 1; | |
1466 | } | |
1467 | ||
1468 | void stop_flist_forward(void) | |
1469 | { | |
1470 | forward_flist_data = 0; | |
1471 | } | |
1472 | ||
1473 | /* Read a message from a multiplexed source. */ | |
1474 | static void read_a_msg(void) | |
1475 | { | |
1476 | char data[BIGPATHBUFLEN]; | |
1477 | int tag, val; | |
1478 | size_t msg_bytes; | |
1479 | ||
1480 | /* This ensures that perform_io() does not try to do any message reading | |
1481 | * until we've read all of the data for this message. We should also | |
1482 | * try to avoid calling things that will cause data to be written via | |
1483 | * perform_io() prior to this being reset to 1. */ | |
1484 | iobuf.in_multiplexed = -1; | |
1485 | ||
1486 | tag = raw_read_int(); | |
1487 | ||
1488 | msg_bytes = tag & 0xFFFFFF; | |
1489 | tag = (tag >> 24) - MPLEX_BASE; | |
1490 | ||
1491 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 1)) { | |
1492 | rprintf(FINFO, "[%s] got msg=%d, len=%" SIZE_T_FMT_MOD "d\n", | |
1493 | who_am_i(), (int)tag, (SIZE_T_FMT_CAST)msg_bytes); | |
1494 | } | |
1495 | ||
1496 | switch (tag) { | |
1497 | case MSG_DATA: | |
1498 | assert(iobuf.raw_input_ends_before == 0); | |
1499 | /* Though this does not yet read the data, we do mark where in | |
1500 | * the buffer the msg data will end once it is read. It is | |
1501 | * possible that this points off the end of the buffer, in | |
1502 | * which case the gradual reading of the input stream will | |
1503 | * cause this value to wrap around and eventually become real. */ | |
1504 | if (msg_bytes) | |
1505 | iobuf.raw_input_ends_before = iobuf.in.pos + msg_bytes; | |
1506 | iobuf.in_multiplexed = 1; | |
1507 | break; | |
1508 | case MSG_STATS: | |
1509 | if (msg_bytes != sizeof stats.total_read || !am_generator) | |
1510 | goto invalid_msg; | |
1511 | raw_read_buf((char*)&stats.total_read, sizeof stats.total_read); | |
1512 | iobuf.in_multiplexed = 1; | |
1513 | break; | |
1514 | case MSG_REDO: | |
1515 | if (msg_bytes != 4 || !am_generator) | |
1516 | goto invalid_msg; | |
1517 | val = raw_read_int(); | |
1518 | iobuf.in_multiplexed = 1; | |
1519 | got_flist_entry_status(FES_REDO, val); | |
1520 | break; | |
1521 | case MSG_IO_ERROR: | |
1522 | if (msg_bytes != 4) | |
1523 | goto invalid_msg; | |
1524 | val = raw_read_int(); | |
1525 | iobuf.in_multiplexed = 1; | |
1526 | io_error |= val; | |
1527 | if (am_receiver) | |
1528 | send_msg_int(MSG_IO_ERROR, val); | |
1529 | break; | |
1530 | case MSG_IO_TIMEOUT: | |
1531 | if (msg_bytes != 4 || am_server || am_generator) | |
1532 | goto invalid_msg; | |
1533 | val = raw_read_int(); | |
1534 | iobuf.in_multiplexed = 1; | |
1535 | if (!io_timeout || io_timeout > val) { | |
1536 | if (INFO_GTE(MISC, 2)) | |
1537 | rprintf(FINFO, "Setting --timeout=%d to match server\n", val); | |
1538 | set_io_timeout(val); | |
1539 | } | |
1540 | break; | |
1541 | case MSG_NOOP: | |
1542 | /* Support protocol-30 keep-alive method. */ | |
1543 | if (msg_bytes != 0) | |
1544 | goto invalid_msg; | |
1545 | iobuf.in_multiplexed = 1; | |
1546 | if (am_sender) | |
1547 | maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH); | |
1548 | break; | |
1549 | case MSG_DELETED: | |
1550 | if (msg_bytes >= sizeof data) | |
1551 | goto overflow; | |
1552 | if (am_generator) { | |
1553 | raw_read_buf(data, msg_bytes); | |
1554 | iobuf.in_multiplexed = 1; | |
1555 | send_msg(MSG_DELETED, data, msg_bytes, 1); | |
1556 | break; | |
1557 | } | |
1558 | #ifdef ICONV_OPTION | |
1559 | if (ic_recv != (iconv_t)-1) { | |
1560 | xbuf outbuf, inbuf; | |
1561 | char ibuf[512]; | |
1562 | int add_null = 0; | |
1563 | int flags = ICB_INCLUDE_BAD | ICB_INIT; | |
1564 | ||
1565 | INIT_CONST_XBUF(outbuf, data); | |
1566 | INIT_XBUF(inbuf, ibuf, 0, (size_t)-1); | |
1567 | ||
1568 | while (msg_bytes) { | |
1569 | size_t len = msg_bytes > sizeof ibuf - inbuf.len ? sizeof ibuf - inbuf.len : msg_bytes; | |
1570 | raw_read_buf(ibuf + inbuf.len, len); | |
1571 | inbuf.pos = 0; | |
1572 | inbuf.len += len; | |
1573 | if (!(msg_bytes -= len) && !ibuf[inbuf.len-1]) | |
1574 | inbuf.len--, add_null = 1; | |
1575 | if (iconvbufs(ic_send, &inbuf, &outbuf, flags) < 0) { | |
1576 | if (errno == E2BIG) | |
1577 | goto overflow; | |
1578 | /* Buffer ended with an incomplete char, so move the | |
1579 | * bytes to the start of the buffer and continue. */ | |
1580 | memmove(ibuf, ibuf + inbuf.pos, inbuf.len); | |
1581 | } | |
1582 | flags &= ~ICB_INIT; | |
1583 | } | |
1584 | if (add_null) { | |
1585 | if (outbuf.len == outbuf.size) | |
1586 | goto overflow; | |
1587 | outbuf.buf[outbuf.len++] = '\0'; | |
1588 | } | |
1589 | msg_bytes = outbuf.len; | |
1590 | } else | |
1591 | #endif | |
1592 | raw_read_buf(data, msg_bytes); | |
1593 | iobuf.in_multiplexed = 1; | |
1594 | /* A directory name was sent with the trailing null */ | |
1595 | if (msg_bytes > 0 && !data[msg_bytes-1]) | |
1596 | log_delete(data, S_IFDIR); | |
1597 | else { | |
1598 | data[msg_bytes] = '\0'; | |
1599 | log_delete(data, S_IFREG); | |
1600 | } | |
1601 | break; | |
1602 | case MSG_SUCCESS: | |
1603 | if (msg_bytes != (local_server ? 4+8+8 : 4)) { | |
1604 | invalid_msg: | |
1605 | rprintf(FERROR, "invalid multi-message %d:%lu [%s%s]\n", | |
1606 | tag, (unsigned long)msg_bytes, who_am_i(), | |
1607 | inc_recurse ? "/inc" : ""); | |
1608 | exit_cleanup(RERR_STREAMIO); | |
1609 | } | |
1610 | raw_read_buf(num_dev_ino_buf, msg_bytes); | |
1611 | val = IVAL(num_dev_ino_buf, 0); | |
1612 | iobuf.in_multiplexed = 1; | |
1613 | if (am_generator) | |
1614 | got_flist_entry_status(FES_SUCCESS, val); | |
1615 | else | |
1616 | successful_send(val); | |
1617 | break; | |
1618 | case MSG_NO_SEND: | |
1619 | if (msg_bytes != 4) | |
1620 | goto invalid_msg; | |
1621 | val = raw_read_int(); | |
1622 | iobuf.in_multiplexed = 1; | |
1623 | if (am_generator) | |
1624 | got_flist_entry_status(FES_NO_SEND, val); | |
1625 | else | |
1626 | send_msg_int(MSG_NO_SEND, val); | |
1627 | break; | |
1628 | case MSG_ERROR_SOCKET: | |
1629 | case MSG_ERROR_UTF8: | |
1630 | case MSG_CLIENT: | |
1631 | case MSG_LOG: | |
1632 | if (!am_generator) | |
1633 | goto invalid_msg; | |
1634 | if (tag == MSG_ERROR_SOCKET) | |
1635 | msgs2stderr = 1; | |
1636 | /* FALL THROUGH */ | |
1637 | case MSG_INFO: | |
1638 | case MSG_ERROR: | |
1639 | case MSG_ERROR_XFER: | |
1640 | case MSG_WARNING: | |
1641 | if (msg_bytes >= sizeof data) { | |
1642 | overflow: | |
1643 | rprintf(FERROR, | |
1644 | "multiplexing overflow %d:%lu [%s%s]\n", | |
1645 | tag, (unsigned long)msg_bytes, who_am_i(), | |
1646 | inc_recurse ? "/inc" : ""); | |
1647 | exit_cleanup(RERR_STREAMIO); | |
1648 | } | |
1649 | raw_read_buf(data, msg_bytes); | |
1650 | /* We don't set in_multiplexed value back to 1 before writing this message | |
1651 | * because the write might loop back and read yet another message, over and | |
1652 | * over again, while waiting for room to put the message in the msg buffer. */ | |
1653 | rwrite((enum logcode)tag, data, msg_bytes, !am_generator); | |
1654 | iobuf.in_multiplexed = 1; | |
1655 | if (first_message) { | |
1656 | if (list_only && !am_sender && tag == 1 && msg_bytes < sizeof data) { | |
1657 | data[msg_bytes] = '\0'; | |
1658 | check_for_d_option_error(data); | |
1659 | } | |
1660 | first_message = 0; | |
1661 | } | |
1662 | break; | |
1663 | case MSG_ERROR_EXIT: | |
1664 | if (msg_bytes == 4) | |
1665 | val = raw_read_int(); | |
1666 | else if (msg_bytes == 0) | |
1667 | val = 0; | |
1668 | else | |
1669 | goto invalid_msg; | |
1670 | iobuf.in_multiplexed = 1; | |
1671 | if (DEBUG_GTE(EXIT, 3)) { | |
1672 | rprintf(FINFO, "[%s] got MSG_ERROR_EXIT with %" SIZE_T_FMT_MOD "d bytes\n", | |
1673 | who_am_i(), (SIZE_T_FMT_CAST)msg_bytes); | |
1674 | } | |
1675 | if (msg_bytes == 0) { | |
1676 | if (!am_sender && !am_generator) { | |
1677 | if (DEBUG_GTE(EXIT, 3)) { | |
1678 | rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n", | |
1679 | who_am_i()); | |
1680 | } | |
1681 | send_msg(MSG_ERROR_EXIT, "", 0, 0); | |
1682 | io_flush(FULL_FLUSH); | |
1683 | } | |
1684 | } else if (protocol_version >= 31) { | |
1685 | if (am_generator || am_receiver) { | |
1686 | if (DEBUG_GTE(EXIT, 3)) { | |
1687 | rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT with exit_code %d\n", | |
1688 | who_am_i(), val); | |
1689 | } | |
1690 | send_msg_int(MSG_ERROR_EXIT, val); | |
1691 | } else { | |
1692 | if (DEBUG_GTE(EXIT, 3)) { | |
1693 | rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n", | |
1694 | who_am_i()); | |
1695 | } | |
1696 | send_msg(MSG_ERROR_EXIT, "", 0, 0); | |
1697 | } | |
1698 | } | |
1699 | /* Send a negative linenum so that we don't end up | |
1700 | * with a duplicate exit message. */ | |
1701 | _exit_cleanup(val, __FILE__, 0 - __LINE__); | |
1702 | default: | |
1703 | rprintf(FERROR, "unexpected tag %d [%s%s]\n", | |
1704 | tag, who_am_i(), inc_recurse ? "/inc" : ""); | |
1705 | exit_cleanup(RERR_STREAMIO); | |
1706 | } | |
1707 | ||
1708 | assert(iobuf.in_multiplexed > 0); | |
1709 | } | |
1710 | ||
1711 | static void drain_multiplex_messages(void) | |
1712 | { | |
1713 | while (IN_MULTIPLEXED_AND_READY && iobuf.in.len) { | |
1714 | if (iobuf.raw_input_ends_before) { | |
1715 | size_t raw_len = iobuf.raw_input_ends_before - iobuf.in.pos; | |
1716 | iobuf.raw_input_ends_before = 0; | |
1717 | if (raw_len >= iobuf.in.len) { | |
1718 | iobuf.in.len = 0; | |
1719 | break; | |
1720 | } | |
1721 | iobuf.in.len -= raw_len; | |
1722 | if ((iobuf.in.pos += raw_len) >= iobuf.in.size) | |
1723 | iobuf.in.pos -= iobuf.in.size; | |
1724 | } | |
1725 | read_a_msg(); | |
1726 | } | |
1727 | } | |
1728 | ||
1729 | void wait_for_receiver(void) | |
1730 | { | |
1731 | if (!iobuf.raw_input_ends_before) | |
1732 | read_a_msg(); | |
1733 | ||
1734 | if (iobuf.raw_input_ends_before) { | |
1735 | int ndx = read_int(iobuf.in_fd); | |
1736 | if (ndx < 0) { | |
1737 | switch (ndx) { | |
1738 | case NDX_FLIST_EOF: | |
1739 | flist_eof = 1; | |
1740 | if (DEBUG_GTE(FLIST, 3)) | |
1741 | rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i()); | |
1742 | break; | |
1743 | case NDX_DONE: | |
1744 | msgdone_cnt++; | |
1745 | break; | |
1746 | default: | |
1747 | exit_cleanup(RERR_STREAMIO); | |
1748 | } | |
1749 | } else { | |
1750 | struct file_list *flist; | |
1751 | flist_receiving_enabled = False; | |
1752 | if (DEBUG_GTE(FLIST, 2)) { | |
1753 | rprintf(FINFO, "[%s] receiving flist for dir %d\n", | |
1754 | who_am_i(), ndx); | |
1755 | } | |
1756 | flist = recv_file_list(iobuf.in_fd, ndx); | |
1757 | flist->parent_ndx = ndx; | |
1758 | #ifdef SUPPORT_HARD_LINKS | |
1759 | if (preserve_hard_links) | |
1760 | match_hard_links(flist); | |
1761 | #endif | |
1762 | flist_receiving_enabled = True; | |
1763 | } | |
1764 | } | |
1765 | } | |
1766 | ||
1767 | unsigned short read_shortint(int f) | |
1768 | { | |
1769 | char b[2]; | |
1770 | read_buf(f, b, 2); | |
1771 | return (UVAL(b, 1) << 8) + UVAL(b, 0); | |
1772 | } | |
1773 | ||
1774 | int32 read_int(int f) | |
1775 | { | |
1776 | char b[4]; | |
1777 | int32 num; | |
1778 | ||
1779 | read_buf(f, b, 4); | |
1780 | num = IVAL(b, 0); | |
1781 | #if SIZEOF_INT32 > 4 | |
1782 | if (num & (int32)0x80000000) | |
1783 | num |= ~(int32)0xffffffff; | |
1784 | #endif | |
1785 | return num; | |
1786 | } | |
1787 | ||
1788 | uint32 read_uint(int f) | |
1789 | { | |
1790 | char b[4]; | |
1791 | read_buf(f, b, 4); | |
1792 | return IVAL(b, 0); | |
1793 | } | |
1794 | ||
1795 | int32 read_varint(int f) | |
1796 | { | |
1797 | union { | |
1798 | char b[5]; | |
1799 | int32 x; | |
1800 | } u; | |
1801 | uchar ch; | |
1802 | int extra; | |
1803 | ||
1804 | u.x = 0; | |
1805 | ch = read_byte(f); | |
1806 | extra = int_byte_extra[ch / 4]; | |
1807 | if (extra) { | |
1808 | uchar bit = ((uchar)1<<(8-extra)); | |
1809 | if (extra >= (int)sizeof u.b) { | |
1810 | rprintf(FERROR, "Overflow in read_varint()\n"); | |
1811 | exit_cleanup(RERR_STREAMIO); | |
1812 | } | |
1813 | read_buf(f, u.b, extra); | |
1814 | u.b[extra] = ch & (bit-1); | |
1815 | } else | |
1816 | u.b[0] = ch; | |
1817 | #if CAREFUL_ALIGNMENT | |
1818 | u.x = IVAL(u.b,0); | |
1819 | #endif | |
1820 | #if SIZEOF_INT32 > 4 | |
1821 | if (u.x & (int32)0x80000000) | |
1822 | u.x |= ~(int32)0xffffffff; | |
1823 | #endif | |
1824 | return u.x; | |
1825 | } | |
1826 | ||
1827 | int64 read_varlong(int f, uchar min_bytes) | |
1828 | { | |
1829 | union { | |
1830 | char b[9]; | |
1831 | int64 x; | |
1832 | } u; | |
1833 | char b2[8]; | |
1834 | int extra; | |
1835 | ||
1836 | #if SIZEOF_INT64 < 8 | |
1837 | memset(u.b, 0, 8); | |
1838 | #else | |
1839 | u.x = 0; | |
1840 | #endif | |
1841 | read_buf(f, b2, min_bytes); | |
1842 | memcpy(u.b, b2+1, min_bytes-1); | |
1843 | extra = int_byte_extra[CVAL(b2, 0) / 4]; | |
1844 | if (extra) { | |
1845 | uchar bit = ((uchar)1<<(8-extra)); | |
1846 | if (min_bytes + extra > (int)sizeof u.b) { | |
1847 | rprintf(FERROR, "Overflow in read_varlong()\n"); | |
1848 | exit_cleanup(RERR_STREAMIO); | |
1849 | } | |
1850 | read_buf(f, u.b + min_bytes - 1, extra); | |
1851 | u.b[min_bytes + extra - 1] = CVAL(b2, 0) & (bit-1); | |
1852 | #if SIZEOF_INT64 < 8 | |
1853 | if (min_bytes + extra > 5 || u.b[4] || CVAL(u.b,3) & 0x80) { | |
1854 | rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n"); | |
1855 | exit_cleanup(RERR_UNSUPPORTED); | |
1856 | } | |
1857 | #endif | |
1858 | } else | |
1859 | u.b[min_bytes + extra - 1] = CVAL(b2, 0); | |
1860 | #if SIZEOF_INT64 < 8 | |
1861 | u.x = IVAL(u.b,0); | |
1862 | #elif CAREFUL_ALIGNMENT | |
1863 | u.x = IVAL64(u.b,0); | |
1864 | #endif | |
1865 | return u.x; | |
1866 | } | |
1867 | ||
1868 | int64 read_longint(int f) | |
1869 | { | |
1870 | #if SIZEOF_INT64 >= 8 | |
1871 | char b[9]; | |
1872 | #endif | |
1873 | int32 num = read_int(f); | |
1874 | ||
1875 | if (num != (int32)0xffffffff) | |
1876 | return num; | |
1877 | ||
1878 | #if SIZEOF_INT64 < 8 | |
1879 | rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n"); | |
1880 | exit_cleanup(RERR_UNSUPPORTED); | |
1881 | #else | |
1882 | read_buf(f, b, 8); | |
1883 | return IVAL(b,0) | (((int64)IVAL(b,4))<<32); | |
1884 | #endif | |
1885 | } | |
1886 | ||
1887 | /* Debugging note: this will be named read_buf_() when using an external zlib. */ | |
1888 | void read_buf(int f, char *buf, size_t len) | |
1889 | { | |
1890 | if (f != iobuf.in_fd) { | |
1891 | if (safe_read(f, buf, len) != len) | |
1892 | whine_about_eof(False); /* Doesn't return. */ | |
1893 | goto batch_copy; | |
1894 | } | |
1895 | ||
1896 | if (!IN_MULTIPLEXED) { | |
1897 | raw_read_buf(buf, len); | |
1898 | total_data_read += len; | |
1899 | if (forward_flist_data) | |
1900 | write_buf(iobuf.out_fd, buf, len); | |
1901 | batch_copy: | |
1902 | if (f == write_batch_monitor_in) | |
1903 | safe_write(batch_fd, buf, len); | |
1904 | return; | |
1905 | } | |
1906 | ||
1907 | while (1) { | |
1908 | size_t siz; | |
1909 | ||
1910 | while (!iobuf.raw_input_ends_before) | |
1911 | read_a_msg(); | |
1912 | ||
1913 | siz = MIN(len, iobuf.raw_input_ends_before - iobuf.in.pos); | |
1914 | if (siz >= iobuf.in.size) | |
1915 | siz = iobuf.in.size; | |
1916 | raw_read_buf(buf, siz); | |
1917 | total_data_read += siz; | |
1918 | ||
1919 | if (forward_flist_data) | |
1920 | write_buf(iobuf.out_fd, buf, siz); | |
1921 | ||
1922 | if (f == write_batch_monitor_in) | |
1923 | safe_write(batch_fd, buf, siz); | |
1924 | ||
1925 | if ((len -= siz) == 0) | |
1926 | break; | |
1927 | buf += siz; | |
1928 | } | |
1929 | } | |
1930 | ||
1931 | void read_sbuf(int f, char *buf, size_t len) | |
1932 | { | |
1933 | read_buf(f, buf, len); | |
1934 | buf[len] = '\0'; | |
1935 | } | |
1936 | ||
1937 | uchar read_byte(int f) | |
1938 | { | |
1939 | uchar c; | |
1940 | read_buf(f, (char*)&c, 1); | |
1941 | return c; | |
1942 | } | |
1943 | ||
1944 | int read_vstring(int f, char *buf, int bufsize) | |
1945 | { | |
1946 | int len = read_byte(f); | |
1947 | ||
1948 | if (len & 0x80) | |
1949 | len = (len & ~0x80) * 0x100 + read_byte(f); | |
1950 | ||
1951 | if (len >= bufsize) { | |
1952 | rprintf(FERROR, "over-long vstring received (%d > %d)\n", | |
1953 | len, bufsize - 1); | |
1954 | return -1; | |
1955 | } | |
1956 | ||
1957 | if (len) | |
1958 | read_buf(f, buf, len); | |
1959 | buf[len] = '\0'; | |
1960 | return len; | |
1961 | } | |
1962 | ||
1963 | /* Populate a sum_struct with values from the socket. This is | |
1964 | * called by both the sender and the receiver. */ | |
1965 | void read_sum_head(int f, struct sum_struct *sum) | |
1966 | { | |
1967 | int32 max_blength = protocol_version < 30 ? OLD_MAX_BLOCK_SIZE : MAX_BLOCK_SIZE; | |
1968 | sum->count = read_int(f); | |
1969 | if (sum->count < 0) { | |
1970 | rprintf(FERROR, "Invalid checksum count %ld [%s]\n", | |
1971 | (long)sum->count, who_am_i()); | |
1972 | exit_cleanup(RERR_PROTOCOL); | |
1973 | } | |
1974 | sum->blength = read_int(f); | |
1975 | if (sum->blength < 0 || sum->blength > max_blength) { | |
1976 | rprintf(FERROR, "Invalid block length %ld [%s]\n", | |
1977 | (long)sum->blength, who_am_i()); | |
1978 | exit_cleanup(RERR_PROTOCOL); | |
1979 | } | |
1980 | sum->s2length = protocol_version < 27 ? csum_length : (int)read_int(f); | |
1981 | if (sum->s2length < 0 || sum->s2length > xfer_sum_len) { | |
1982 | rprintf(FERROR, "Invalid checksum length %d [%s]\n", | |
1983 | sum->s2length, who_am_i()); | |
1984 | exit_cleanup(RERR_PROTOCOL); | |
1985 | } | |
1986 | sum->remainder = read_int(f); | |
1987 | if (sum->remainder < 0 || sum->remainder > sum->blength) { | |
1988 | rprintf(FERROR, "Invalid remainder length %ld [%s]\n", | |
1989 | (long)sum->remainder, who_am_i()); | |
1990 | exit_cleanup(RERR_PROTOCOL); | |
1991 | } | |
1992 | } | |
1993 | ||
1994 | /* Send the values from a sum_struct over the socket. Set sum to | |
1995 | * NULL if there are no checksums to send. This is called by both | |
1996 | * the generator and the sender. */ | |
1997 | void write_sum_head(int f, struct sum_struct *sum) | |
1998 | { | |
1999 | static struct sum_struct null_sum; | |
2000 | ||
2001 | if (sum == NULL) | |
2002 | sum = &null_sum; | |
2003 | ||
2004 | write_int(f, sum->count); | |
2005 | write_int(f, sum->blength); | |
2006 | if (protocol_version >= 27) | |
2007 | write_int(f, sum->s2length); | |
2008 | write_int(f, sum->remainder); | |
2009 | } | |
2010 | ||
2011 | /* Sleep after writing to limit I/O bandwidth usage. | |
2012 | * | |
2013 | * @todo Rather than sleeping after each write, it might be better to | |
2014 | * use some kind of averaging. The current algorithm seems to always | |
2015 | * use a bit less bandwidth than specified, because it doesn't make up | |
2016 | * for slow periods. But arguably this is a feature. In addition, we | |
2017 | * ought to take the time used to write the data into account. | |
2018 | * | |
2019 | * During some phases of big transfers (file FOO is uptodate) this is | |
2020 | * called with a small bytes_written every time. As the kernel has to | |
2021 | * round small waits up to guarantee that we actually wait at least the | |
2022 | * requested number of microseconds, this can become grossly inaccurate. | |
2023 | * We therefore keep track of the bytes we've written over time and only | |
2024 | * sleep when the accumulated delay is at least 1 tenth of a second. */ | |
2025 | static void sleep_for_bwlimit(int bytes_written) | |
2026 | { | |
2027 | static struct timeval prior_tv; | |
2028 | static long total_written = 0; | |
2029 | struct timeval tv, start_tv; | |
2030 | long elapsed_usec, sleep_usec; | |
2031 | ||
2032 | #define ONE_SEC 1000000L /* # of microseconds in a second */ | |
2033 | ||
2034 | total_written += bytes_written; | |
2035 | ||
2036 | gettimeofday(&start_tv, NULL); | |
2037 | if (prior_tv.tv_sec) { | |
2038 | elapsed_usec = (start_tv.tv_sec - prior_tv.tv_sec) * ONE_SEC | |
2039 | + (start_tv.tv_usec - prior_tv.tv_usec); | |
2040 | total_written -= (int64)elapsed_usec * bwlimit / (ONE_SEC/1024); | |
2041 | if (total_written < 0) | |
2042 | total_written = 0; | |
2043 | } | |
2044 | ||
2045 | sleep_usec = total_written * (ONE_SEC/1024) / bwlimit; | |
2046 | if (sleep_usec < ONE_SEC / 10) { | |
2047 | prior_tv = start_tv; | |
2048 | return; | |
2049 | } | |
2050 | ||
2051 | tv.tv_sec = sleep_usec / ONE_SEC; | |
2052 | tv.tv_usec = sleep_usec % ONE_SEC; | |
2053 | select(0, NULL, NULL, NULL, &tv); | |
2054 | ||
2055 | gettimeofday(&prior_tv, NULL); | |
2056 | elapsed_usec = (prior_tv.tv_sec - start_tv.tv_sec) * ONE_SEC | |
2057 | + (prior_tv.tv_usec - start_tv.tv_usec); | |
2058 | total_written = (sleep_usec - elapsed_usec) * bwlimit / (ONE_SEC/1024); | |
2059 | } | |
2060 | ||
2061 | void io_flush(int flush_type) | |
2062 | { | |
2063 | if (iobuf.out.len > iobuf.out_empty_len) { | |
2064 | if (flush_type == FULL_FLUSH) /* flush everything in the output buffers */ | |
2065 | perform_io(iobuf.out.size - iobuf.out_empty_len, PIO_NEED_OUTROOM); | |
2066 | else if (flush_type == NORMAL_FLUSH) /* flush at least 1 byte */ | |
2067 | perform_io(iobuf.out.size - iobuf.out.len + 1, PIO_NEED_OUTROOM); | |
2068 | /* MSG_FLUSH: flush iobuf.msg only */ | |
2069 | } | |
2070 | if (iobuf.msg.len) | |
2071 | perform_io(iobuf.msg.size, PIO_NEED_MSGROOM); | |
2072 | } | |
2073 | ||
2074 | void write_shortint(int f, unsigned short x) | |
2075 | { | |
2076 | char b[2]; | |
2077 | b[0] = (char)x; | |
2078 | b[1] = (char)(x >> 8); | |
2079 | write_buf(f, b, 2); | |
2080 | } | |
2081 | ||
2082 | void write_int(int f, int32 x) | |
2083 | { | |
2084 | char b[4]; | |
2085 | SIVAL(b, 0, x); | |
2086 | write_buf(f, b, 4); | |
2087 | } | |
2088 | ||
2089 | void write_varint(int f, int32 x) | |
2090 | { | |
2091 | char b[5]; | |
2092 | uchar bit; | |
2093 | int cnt; | |
2094 | ||
2095 | SIVAL(b, 1, x); | |
2096 | ||
2097 | for (cnt = 4; cnt > 1 && b[cnt] == 0; cnt--) {} | |
2098 | bit = ((uchar)1<<(7-cnt+1)); | |
2099 | ||
2100 | if (CVAL(b, cnt) >= bit) { | |
2101 | cnt++; | |
2102 | *b = ~(bit-1); | |
2103 | } else if (cnt > 1) | |
2104 | *b = b[cnt] | ~(bit*2-1); | |
2105 | else | |
2106 | *b = b[1]; | |
2107 | ||
2108 | write_buf(f, b, cnt); | |
2109 | } | |
2110 | ||
2111 | void write_varlong(int f, int64 x, uchar min_bytes) | |
2112 | { | |
2113 | char b[9]; | |
2114 | uchar bit; | |
2115 | int cnt = 8; | |
2116 | ||
2117 | #if SIZEOF_INT64 >= 8 | |
2118 | SIVAL64(b, 1, x); | |
2119 | #else | |
2120 | SIVAL(b, 1, x); | |
2121 | if (x <= 0x7FFFFFFF && x >= 0) | |
2122 | memset(b + 5, 0, 4); | |
2123 | else { | |
2124 | rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n"); | |
2125 | exit_cleanup(RERR_UNSUPPORTED); | |
2126 | } | |
2127 | #endif | |
2128 | ||
2129 | while (cnt > min_bytes && b[cnt] == 0) | |
2130 | cnt--; | |
2131 | bit = ((uchar)1<<(7-cnt+min_bytes)); | |
2132 | if (CVAL(b, cnt) >= bit) { | |
2133 | cnt++; | |
2134 | *b = ~(bit-1); | |
2135 | } else if (cnt > min_bytes) | |
2136 | *b = b[cnt] | ~(bit*2-1); | |
2137 | else | |
2138 | *b = b[cnt]; | |
2139 | ||
2140 | write_buf(f, b, cnt); | |
2141 | } | |
2142 | ||
2143 | /* | |
2144 | * Note: int64 may actually be a 32-bit type if ./configure couldn't find any | |
2145 | * 64-bit types on this platform. | |
2146 | */ | |
2147 | void write_longint(int f, int64 x) | |
2148 | { | |
2149 | char b[12], * const s = b+4; | |
2150 | ||
2151 | SIVAL(s, 0, x); | |
2152 | if (x <= 0x7FFFFFFF && x >= 0) { | |
2153 | write_buf(f, s, 4); | |
2154 | return; | |
2155 | } | |
2156 | ||
2157 | #if SIZEOF_INT64 < 8 | |
2158 | rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n"); | |
2159 | exit_cleanup(RERR_UNSUPPORTED); | |
2160 | #else | |
2161 | memset(b, 0xFF, 4); | |
2162 | SIVAL(s, 4, x >> 32); | |
2163 | write_buf(f, b, 12); | |
2164 | #endif | |
2165 | } | |
2166 | ||
2167 | void write_bigbuf(int f, const char *buf, size_t len) | |
2168 | { | |
2169 | size_t half_max = (iobuf.out.size - iobuf.out_empty_len) / 2; | |
2170 | ||
2171 | while (len > half_max + 1024) { | |
2172 | write_buf(f, buf, half_max); | |
2173 | buf += half_max; | |
2174 | len -= half_max; | |
2175 | } | |
2176 | ||
2177 | write_buf(f, buf, len); | |
2178 | } | |
2179 | ||
2180 | void write_buf(int f, const char *buf, size_t len) | |
2181 | { | |
2182 | size_t pos, siz; | |
2183 | ||
2184 | if (f != iobuf.out_fd) { | |
2185 | safe_write(f, buf, len); | |
2186 | goto batch_copy; | |
2187 | } | |
2188 | ||
2189 | if (iobuf.out.len + len > iobuf.out.size) | |
2190 | perform_io(len, PIO_NEED_OUTROOM); | |
2191 | ||
2192 | pos = iobuf.out.pos + iobuf.out.len; /* Must be set after any flushing. */ | |
2193 | if (pos >= iobuf.out.size) | |
2194 | pos -= iobuf.out.size; | |
2195 | ||
2196 | /* Handle a split copy if we wrap around the end of the circular buffer. */ | |
2197 | if (pos >= iobuf.out.pos && (siz = iobuf.out.size - pos) < len) { | |
2198 | memcpy(iobuf.out.buf + pos, buf, siz); | |
2199 | memcpy(iobuf.out.buf, buf + siz, len - siz); | |
2200 | } else | |
2201 | memcpy(iobuf.out.buf + pos, buf, len); | |
2202 | ||
2203 | iobuf.out.len += len; | |
2204 | total_data_written += len; | |
2205 | ||
2206 | batch_copy: | |
2207 | if (f == write_batch_monitor_out) | |
2208 | safe_write(batch_fd, buf, len); | |
2209 | } | |
2210 | ||
2211 | /* Write a string to the connection */ | |
2212 | void write_sbuf(int f, const char *buf) | |
2213 | { | |
2214 | write_buf(f, buf, strlen(buf)); | |
2215 | } | |
2216 | ||
2217 | void write_byte(int f, uchar c) | |
2218 | { | |
2219 | write_buf(f, (char *)&c, 1); | |
2220 | } | |
2221 | ||
2222 | void write_vstring(int f, const char *str, int len) | |
2223 | { | |
2224 | uchar lenbuf[3], *lb = lenbuf; | |
2225 | ||
2226 | if (len > 0x7F) { | |
2227 | if (len > 0x7FFF) { | |
2228 | rprintf(FERROR, | |
2229 | "attempting to send over-long vstring (%d > %d)\n", | |
2230 | len, 0x7FFF); | |
2231 | exit_cleanup(RERR_PROTOCOL); | |
2232 | } | |
2233 | *lb++ = len / 0x100 + 0x80; | |
2234 | } | |
2235 | *lb = len; | |
2236 | ||
2237 | write_buf(f, (char*)lenbuf, lb - lenbuf + 1); | |
2238 | if (len) | |
2239 | write_buf(f, str, len); | |
2240 | } | |
2241 | ||
2242 | /* Send a file-list index using a byte-reduction method. */ | |
2243 | void write_ndx(int f, int32 ndx) | |
2244 | { | |
2245 | static int32 prev_positive = -1, prev_negative = 1; | |
2246 | int32 diff, cnt = 0; | |
2247 | char b[6]; | |
2248 | ||
2249 | if (protocol_version < 30 || read_batch) { | |
2250 | write_int(f, ndx); | |
2251 | return; | |
2252 | } | |
2253 | ||
2254 | /* Send NDX_DONE as a single-byte 0 with no side effects. Send | |
2255 | * negative nums as a positive after sending a leading 0xFF. */ | |
2256 | if (ndx >= 0) { | |
2257 | diff = ndx - prev_positive; | |
2258 | prev_positive = ndx; | |
2259 | } else if (ndx == NDX_DONE) { | |
2260 | *b = 0; | |
2261 | write_buf(f, b, 1); | |
2262 | return; | |
2263 | } else { | |
2264 | b[cnt++] = (char)0xFF; | |
2265 | ndx = -ndx; | |
2266 | diff = ndx - prev_negative; | |
2267 | prev_negative = ndx; | |
2268 | } | |
2269 | ||
2270 | /* A diff of 1 - 253 is sent as a one-byte diff; a diff of 254 - 32767 | |
2271 | * or 0 is sent as a 0xFE + a two-byte diff; otherwise we send 0xFE | |
2272 | * & all 4 bytes of the (non-negative) num with the high-bit set. */ | |
2273 | if (diff < 0xFE && diff > 0) | |
2274 | b[cnt++] = (char)diff; | |
2275 | else if (diff < 0 || diff > 0x7FFF) { | |
2276 | b[cnt++] = (char)0xFE; | |
2277 | b[cnt++] = (char)((ndx >> 24) | 0x80); | |
2278 | b[cnt++] = (char)ndx; | |
2279 | b[cnt++] = (char)(ndx >> 8); | |
2280 | b[cnt++] = (char)(ndx >> 16); | |
2281 | } else { | |
2282 | b[cnt++] = (char)0xFE; | |
2283 | b[cnt++] = (char)(diff >> 8); | |
2284 | b[cnt++] = (char)diff; | |
2285 | } | |
2286 | write_buf(f, b, cnt); | |
2287 | } | |
2288 | ||
2289 | /* Receive a file-list index using a byte-reduction method. */ | |
2290 | int32 read_ndx(int f) | |
2291 | { | |
2292 | static int32 prev_positive = -1, prev_negative = 1; | |
2293 | int32 *prev_ptr, num; | |
2294 | char b[4]; | |
2295 | ||
2296 | if (protocol_version < 30) | |
2297 | return read_int(f); | |
2298 | ||
2299 | read_buf(f, b, 1); | |
2300 | if (CVAL(b, 0) == 0xFF) { | |
2301 | read_buf(f, b, 1); | |
2302 | prev_ptr = &prev_negative; | |
2303 | } else if (CVAL(b, 0) == 0) | |
2304 | return NDX_DONE; | |
2305 | else | |
2306 | prev_ptr = &prev_positive; | |
2307 | if (CVAL(b, 0) == 0xFE) { | |
2308 | read_buf(f, b, 2); | |
2309 | if (CVAL(b, 0) & 0x80) { | |
2310 | b[3] = CVAL(b, 0) & ~0x80; | |
2311 | b[0] = b[1]; | |
2312 | read_buf(f, b+1, 2); | |
2313 | num = IVAL(b, 0); | |
2314 | } else | |
2315 | num = (UVAL(b,0)<<8) + UVAL(b,1) + *prev_ptr; | |
2316 | } else | |
2317 | num = UVAL(b, 0) + *prev_ptr; | |
2318 | *prev_ptr = num; | |
2319 | if (prev_ptr == &prev_negative) | |
2320 | num = -num; | |
2321 | return num; | |
2322 | } | |
2323 | ||
2324 | /* Read a line of up to bufsiz-1 characters into buf. Strips | |
2325 | * the (required) trailing newline and all carriage returns. | |
2326 | * Returns 1 for success; 0 for I/O error or truncation. */ | |
2327 | int read_line_old(int fd, char *buf, size_t bufsiz, int eof_ok) | |
2328 | { | |
2329 | assert(fd != iobuf.in_fd); | |
2330 | bufsiz--; /* leave room for the null */ | |
2331 | while (bufsiz > 0) { | |
2332 | if (safe_read(fd, buf, 1) == 0) { | |
2333 | if (eof_ok) | |
2334 | break; | |
2335 | return 0; | |
2336 | } | |
2337 | if (*buf == '\0') | |
2338 | return 0; | |
2339 | if (*buf == '\n') | |
2340 | break; | |
2341 | if (*buf != '\r') { | |
2342 | buf++; | |
2343 | bufsiz--; | |
2344 | } | |
2345 | } | |
2346 | *buf = '\0'; | |
2347 | return bufsiz > 0; | |
2348 | } | |
2349 | ||
2350 | void io_printf(int fd, const char *format, ...) | |
2351 | { | |
2352 | va_list ap; | |
2353 | char buf[BIGPATHBUFLEN]; | |
2354 | int len; | |
2355 | ||
2356 | va_start(ap, format); | |
2357 | len = vsnprintf(buf, sizeof buf, format, ap); | |
2358 | va_end(ap); | |
2359 | ||
2360 | if (len < 0) | |
2361 | exit_cleanup(RERR_PROTOCOL); | |
2362 | ||
2363 | if (len >= (int)sizeof buf) { | |
2364 | rprintf(FERROR, "io_printf() was too long for the buffer.\n"); | |
2365 | exit_cleanup(RERR_PROTOCOL); | |
2366 | } | |
2367 | ||
2368 | write_sbuf(fd, buf); | |
2369 | } | |
2370 | ||
2371 | /* Setup for multiplexing a MSG_* stream with the data stream. */ | |
2372 | void io_start_multiplex_out(int fd) | |
2373 | { | |
2374 | io_flush(FULL_FLUSH); | |
2375 | ||
2376 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) | |
2377 | rprintf(FINFO, "[%s] io_start_multiplex_out(%d)\n", who_am_i(), fd); | |
2378 | ||
2379 | if (!iobuf.msg.buf) | |
2380 | alloc_xbuf(&iobuf.msg, ROUND_UP_1024(IO_BUFFER_SIZE)); | |
2381 | ||
2382 | iobuf.out_empty_len = 4; /* See also OUT_MULTIPLEXED */ | |
2383 | io_start_buffering_out(fd); | |
2384 | got_kill_signal = 0; | |
2385 | ||
2386 | iobuf.raw_data_header_pos = iobuf.out.pos + iobuf.out.len; | |
2387 | iobuf.out.len += 4; | |
2388 | } | |
2389 | ||
2390 | /* Setup for multiplexing a MSG_* stream with the data stream. */ | |
2391 | void io_start_multiplex_in(int fd) | |
2392 | { | |
2393 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) | |
2394 | rprintf(FINFO, "[%s] io_start_multiplex_in(%d)\n", who_am_i(), fd); | |
2395 | ||
2396 | iobuf.in_multiplexed = 1; /* See also IN_MULTIPLEXED */ | |
2397 | io_start_buffering_in(fd); | |
2398 | } | |
2399 | ||
2400 | int io_end_multiplex_in(int mode) | |
2401 | { | |
2402 | int ret = iobuf.in_multiplexed ? iobuf.in_fd : -1; | |
2403 | ||
2404 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) | |
2405 | rprintf(FINFO, "[%s] io_end_multiplex_in(mode=%d)\n", who_am_i(), mode); | |
2406 | ||
2407 | iobuf.in_multiplexed = 0; | |
2408 | if (mode == MPLX_SWITCHING) | |
2409 | iobuf.raw_input_ends_before = 0; | |
2410 | else | |
2411 | assert(iobuf.raw_input_ends_before == 0); | |
2412 | if (mode != MPLX_TO_BUFFERED) | |
2413 | io_end_buffering_in(mode); | |
2414 | ||
2415 | return ret; | |
2416 | } | |
2417 | ||
2418 | int io_end_multiplex_out(int mode) | |
2419 | { | |
2420 | int ret = iobuf.out_empty_len ? iobuf.out_fd : -1; | |
2421 | ||
2422 | if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) | |
2423 | rprintf(FINFO, "[%s] io_end_multiplex_out(mode=%d)\n", who_am_i(), mode); | |
2424 | ||
2425 | if (mode != MPLX_TO_BUFFERED) | |
2426 | io_end_buffering_out(mode); | |
2427 | else | |
2428 | io_flush(FULL_FLUSH); | |
2429 | ||
2430 | iobuf.out.len = 0; | |
2431 | iobuf.out_empty_len = 0; | |
2432 | if (got_kill_signal > 0) /* Just in case... */ | |
2433 | handle_kill_signal(False); | |
2434 | got_kill_signal = -1; | |
2435 | ||
2436 | return ret; | |
2437 | } | |
2438 | ||
2439 | void start_write_batch(int fd) | |
2440 | { | |
2441 | /* Some communication has already taken place, but we don't | |
2442 | * enable batch writing until here so that we can write a | |
2443 | * canonical record of the communication even though the | |
2444 | * actual communication so far depends on whether a daemon | |
2445 | * is involved. */ | |
2446 | write_int(batch_fd, protocol_version); | |
2447 | if (protocol_version >= 30) | |
2448 | write_varint(batch_fd, compat_flags); | |
2449 | write_int(batch_fd, checksum_seed); | |
2450 | ||
2451 | if (am_sender) | |
2452 | write_batch_monitor_out = fd; | |
2453 | else | |
2454 | write_batch_monitor_in = fd; | |
2455 | } | |
2456 | ||
2457 | void stop_write_batch(void) | |
2458 | { | |
2459 | write_batch_monitor_out = -1; | |
2460 | write_batch_monitor_in = -1; | |
2461 | } |