]> git.ipfire.org Git - thirdparty/openvpn.git/blob - src/openvpn/forward.c
Minor fix to process_ip_header
[thirdparty/openvpn.git] / src / openvpn / forward.c
1 /*
2 * OpenVPN -- An application to securely tunnel IP networks
3 * over a single TCP/UDP port, with support for SSL/TLS-based
4 * session authentication and key exchange,
5 * packet encryption, packet authentication, and
6 * packet compression.
7 *
8 * Copyright (C) 2002-2023 OpenVPN Inc <sales@openvpn.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include "syshead.h"
29
30 #include "forward.h"
31 #include "init.h"
32 #include "push.h"
33 #include "gremlin.h"
34 #include "mss.h"
35 #include "event.h"
36 #include "occ.h"
37 #include "ping.h"
38 #include "ps.h"
39 #include "dhcp.h"
40 #include "common.h"
41 #include "ssl_verify.h"
42 #include "dco.h"
43 #include "auth_token.h"
44
45 #include "memdbg.h"
46
47 #include "mstats.h"
48
49 counter_type link_read_bytes_global; /* GLOBAL */
50 counter_type link_write_bytes_global; /* GLOBAL */
51
52 /* show event wait debugging info */
53
54 #ifdef ENABLE_DEBUG
55
56 static const char *
57 wait_status_string(struct context *c, struct gc_arena *gc)
58 {
59 struct buffer out = alloc_buf_gc(64, gc);
60 buf_printf(&out, "I/O WAIT %s|%s|%s|%s %s",
61 tun_stat(c->c1.tuntap, EVENT_READ, gc),
62 tun_stat(c->c1.tuntap, EVENT_WRITE, gc),
63 socket_stat(c->c2.link_socket, EVENT_READ, gc),
64 socket_stat(c->c2.link_socket, EVENT_WRITE, gc),
65 tv_string(&c->c2.timeval, gc));
66 return BSTR(&out);
67 }
68
69 static void
70 show_wait_status(struct context *c)
71 {
72 struct gc_arena gc = gc_new();
73 dmsg(D_EVENT_WAIT, "%s", wait_status_string(c, &gc));
74 gc_free(&gc);
75 }
76
77 #endif /* ifdef ENABLE_DEBUG */
78
79 static void
80 check_tls_errors_co(struct context *c)
81 {
82 msg(D_STREAM_ERRORS, "Fatal TLS error (check_tls_errors_co), restarting");
83 register_signal(c->sig, c->c2.tls_exit_signal, "tls-error"); /* SOFT-SIGUSR1 -- TLS error */
84 }
85
86 static void
87 check_tls_errors_nco(struct context *c)
88 {
89 register_signal(c->sig, c->c2.tls_exit_signal, "tls-error"); /* SOFT-SIGUSR1 -- TLS error */
90 }
91
92 /*
93 * TLS errors are fatal in TCP mode.
94 * Also check for --tls-exit trigger.
95 */
96 static inline void
97 check_tls_errors(struct context *c)
98 {
99 if (c->c2.tls_multi && c->c2.tls_exit_signal)
100 {
101 if (link_socket_connection_oriented(c->c2.link_socket))
102 {
103 if (c->c2.tls_multi->n_soft_errors)
104 {
105 check_tls_errors_co(c);
106 }
107 }
108 else
109 {
110 if (c->c2.tls_multi->n_hard_errors)
111 {
112 check_tls_errors_nco(c);
113 }
114 }
115 }
116 }
117
118 /*
119 * Set our wakeup to 0 seconds, so we will be rescheduled
120 * immediately.
121 */
122 static inline void
123 context_immediate_reschedule(struct context *c)
124 {
125 c->c2.timeval.tv_sec = 0; /* ZERO-TIMEOUT */
126 c->c2.timeval.tv_usec = 0;
127 }
128
129 static inline void
130 context_reschedule_sec(struct context *c, int sec)
131 {
132 if (sec < 0)
133 {
134 sec = 0;
135 }
136 if (sec < c->c2.timeval.tv_sec)
137 {
138 c->c2.timeval.tv_sec = sec;
139 c->c2.timeval.tv_usec = 0;
140 }
141 }
142
143 void
144 check_dco_key_status(struct context *c)
145 {
146 /* DCO context is not yet initialised or enabled */
147 if (!dco_enabled(&c->options))
148 {
149 return;
150 }
151
152 /* no active peer (p2p tls-server mode) */
153 if (c->c2.tls_multi->dco_peer_id == -1)
154 {
155 return;
156 }
157
158 if (!dco_update_keys(&c->c1.tuntap->dco, c->c2.tls_multi))
159 {
160 /* Something bad happened. Kill the connection to
161 * be able to recover. */
162 register_signal(c->sig, SIGUSR1, "dco update keys error");
163 }
164 }
165
166 /*
167 * In TLS mode, let TLS level respond to any control-channel
168 * packets which were received, or prepare any packets for
169 * transmission.
170 *
171 * tmp_int is purely an optimization that allows us to call
172 * tls_multi_process less frequently when there's not much
173 * traffic on the control-channel.
174 *
175 */
176 static void
177 check_tls(struct context *c)
178 {
179 interval_t wakeup = BIG_TIMEOUT;
180
181 if (interval_test(&c->c2.tmp_int))
182 {
183 const int tmp_status = tls_multi_process
184 (c->c2.tls_multi, &c->c2.to_link, &c->c2.to_link_addr,
185 get_link_socket_info(c), &wakeup);
186
187 if (tmp_status == TLSMP_RECONNECT)
188 {
189 event_timeout_init(&c->c2.wait_for_connect, 1, now);
190 reset_coarse_timers(c);
191 }
192
193 if (tmp_status == TLSMP_ACTIVE || tmp_status == TLSMP_RECONNECT)
194 {
195 update_time();
196 interval_action(&c->c2.tmp_int);
197 }
198 else if (tmp_status == TLSMP_KILL)
199 {
200 if (c->options.mode == MODE_SERVER)
201 {
202 send_auth_failed(c, c->c2.tls_multi->client_reason);
203 }
204 else
205 {
206 register_signal(c->sig, SIGTERM, "auth-control-exit");
207 }
208 }
209
210 interval_future_trigger(&c->c2.tmp_int, wakeup);
211 }
212
213 interval_schedule_wakeup(&c->c2.tmp_int, &wakeup);
214
215 /*
216 * Our current code has no good hooks in the TLS machinery to update
217 * DCO keys. So we check the key status after the whole TLS machinery
218 * has been completed and potentially update them
219 *
220 * We have a hidden state transition from secondary to primary key based
221 * on ks->auth_deferred_expire that DCO needs to check that the normal
222 * TLS state engine does not check. So we call the \c check_dco_key_status
223 * function even if tmp_status does not indicate that something has changed.
224 */
225 check_dco_key_status(c);
226
227 if (wakeup)
228 {
229 context_reschedule_sec(c, wakeup);
230 }
231 }
232
233 /*
234 * Handle incoming configuration
235 * messages on the control channel.
236 */
237 static void
238 check_incoming_control_channel(struct context *c)
239 {
240 int len = tls_test_payload_len(c->c2.tls_multi);
241 /* We should only be called with len >0 */
242 ASSERT(len > 0);
243
244 struct gc_arena gc = gc_new();
245 struct buffer buf = alloc_buf_gc(len, &gc);
246 if (tls_rec_payload(c->c2.tls_multi, &buf))
247 {
248 /* force null termination of message */
249 buf_null_terminate(&buf);
250
251 /* enforce character class restrictions */
252 string_mod(BSTR(&buf), CC_PRINT, CC_CRLF, 0);
253
254 if (buf_string_match_head_str(&buf, "AUTH_FAILED"))
255 {
256 receive_auth_failed(c, &buf);
257 }
258 else if (buf_string_match_head_str(&buf, "PUSH_"))
259 {
260 incoming_push_message(c, &buf);
261 }
262 else if (buf_string_match_head_str(&buf, "RESTART"))
263 {
264 server_pushed_signal(c, &buf, true, 7);
265 }
266 else if (buf_string_match_head_str(&buf, "HALT"))
267 {
268 server_pushed_signal(c, &buf, false, 4);
269 }
270 else if (buf_string_match_head_str(&buf, "INFO_PRE"))
271 {
272 server_pushed_info(c, &buf, 8);
273 }
274 else if (buf_string_match_head_str(&buf, "INFO"))
275 {
276 server_pushed_info(c, &buf, 4);
277 }
278 else if (buf_string_match_head_str(&buf, "CR_RESPONSE"))
279 {
280 receive_cr_response(c, &buf);
281 }
282 else if (buf_string_match_head_str(&buf, "AUTH_PENDING"))
283 {
284 receive_auth_pending(c, &buf);
285 }
286 else if (buf_string_match_head_str(&buf, "EXIT"))
287 {
288 receive_exit_message(c);
289 }
290 else
291 {
292 msg(D_PUSH_ERRORS, "WARNING: Received unknown control message: %s", BSTR(&buf));
293 }
294 }
295 else
296 {
297 msg(D_PUSH_ERRORS, "WARNING: Receive control message failed");
298 }
299
300 gc_free(&gc);
301 }
302
303 /*
304 * Periodically resend PUSH_REQUEST until PUSH message received
305 */
306 static void
307 check_push_request(struct context *c)
308 {
309 send_push_request(c);
310
311 /* if no response to first push_request, retry at PUSH_REQUEST_INTERVAL second intervals */
312 event_timeout_modify_wakeup(&c->c2.push_request_interval, PUSH_REQUEST_INTERVAL);
313 }
314
315 /*
316 * Things that need to happen immediately after connection initiation should go here.
317 *
318 * Options like --up-delay need to be triggered by this function which
319 * checks for connection establishment.
320 *
321 * Note: The process_incoming_push_reply currently assumes that this function
322 * only sets up the pull request timer when pull is enabled.
323 */
324 static void
325 check_connection_established(struct context *c)
326 {
327 if (connection_established(c))
328 {
329 /* if --pull was specified, send a push request to server */
330 if (c->c2.tls_multi && c->options.pull)
331 {
332 #ifdef ENABLE_MANAGEMENT
333 if (management)
334 {
335 management_set_state(management,
336 OPENVPN_STATE_GET_CONFIG,
337 NULL,
338 NULL,
339 NULL,
340 NULL,
341 NULL);
342 }
343 #endif
344 /* fire up push request right away (already 1s delayed) */
345 /* We might receive a AUTH_PENDING request before we armed this
346 * timer. In that case we don't change the value */
347 if (c->c2.push_request_timeout < now)
348 {
349 c->c2.push_request_timeout = now + c->options.handshake_window;
350 }
351 event_timeout_init(&c->c2.push_request_interval, 0, now);
352 reset_coarse_timers(c);
353 }
354 else
355 {
356 if (!do_up(c, false, 0))
357 {
358 register_signal(c->sig, SIGUSR1, "connection initialisation failed");
359 }
360 }
361
362 event_timeout_clear(&c->c2.wait_for_connect);
363 }
364 }
365
366 bool
367 send_control_channel_string_dowork(struct tls_session *session,
368 const char *str, int msglevel)
369 {
370 struct gc_arena gc = gc_new();
371 bool stat;
372
373 ASSERT(session);
374 struct key_state *ks = &session->key[KS_PRIMARY];
375
376 /* buffered cleartext write onto TLS control channel */
377 stat = tls_send_payload(ks, (uint8_t *) str, strlen(str) + 1);
378
379 msg(msglevel, "SENT CONTROL [%s]: '%s' (status=%d)",
380 session->common_name ? session->common_name : "UNDEF",
381 sanitize_control_message(str, &gc),
382 (int) stat);
383
384 gc_free(&gc);
385 return stat;
386 }
387
388 void
389 reschedule_multi_process(struct context *c)
390 {
391 interval_action(&c->c2.tmp_int);
392 context_immediate_reschedule(c); /* ZERO-TIMEOUT */
393 }
394
395 bool
396 send_control_channel_string(struct context *c, const char *str, int msglevel)
397 {
398 if (c->c2.tls_multi)
399 {
400 struct tls_session *session = &c->c2.tls_multi->session[TM_ACTIVE];
401 bool ret = send_control_channel_string_dowork(session, str, msglevel);
402 reschedule_multi_process(c);
403
404 return ret;
405 }
406 return true;
407 }
408 /*
409 * Add routes.
410 */
411
412 static void
413 check_add_routes_action(struct context *c, const bool errors)
414 {
415 bool route_status = do_route(&c->options, c->c1.route_list, c->c1.route_ipv6_list,
416 c->c1.tuntap, c->plugins, c->c2.es, &c->net_ctx);
417
418 int flags = (errors ? ISC_ERRORS : 0);
419 flags |= (!route_status ? ISC_ROUTE_ERRORS : 0);
420
421 update_time();
422 event_timeout_clear(&c->c2.route_wakeup);
423 event_timeout_clear(&c->c2.route_wakeup_expire);
424 initialization_sequence_completed(c, flags); /* client/p2p --route-delay was defined */
425 }
426
427 static void
428 check_add_routes(struct context *c)
429 {
430 if (test_routes(c->c1.route_list, c->c1.tuntap))
431 {
432 check_add_routes_action(c, false);
433 }
434 else if (event_timeout_trigger(&c->c2.route_wakeup_expire, &c->c2.timeval, ETT_DEFAULT))
435 {
436 check_add_routes_action(c, true);
437 }
438 else
439 {
440 msg(D_ROUTE, "Route: Waiting for TUN/TAP interface to come up...");
441 if (c->c1.tuntap)
442 {
443 if (!tun_standby(c->c1.tuntap))
444 {
445 register_signal(c->sig, SIGHUP, "ip-fail");
446 c->persist.restart_sleep_seconds = 10;
447 #ifdef _WIN32
448 show_routes(M_INFO|M_NOPREFIX);
449 show_adapters(M_INFO|M_NOPREFIX);
450 #endif
451 }
452 }
453 update_time();
454 if (c->c2.route_wakeup.n != 1)
455 {
456 event_timeout_init(&c->c2.route_wakeup, 1, now);
457 }
458 event_timeout_reset(&c->c2.ping_rec_interval);
459 }
460 }
461
462 /*
463 * Should we exit due to inactivity timeout?
464 *
465 * In the non-dco case, the timeout is reset via register_activity()
466 * whenever there is sufficient activity on tun or link, so this function
467 * is only ever called to raise the TERM signal.
468 *
469 * With DCO, OpenVPN does not see incoming or outgoing data packets anymore
470 * and the logic needs to change - we permit the event to trigger and check
471 * kernel DCO counters here, returning and rearming the timer if there was
472 * sufficient traffic.
473 */
474 static void
475 check_inactivity_timeout(struct context *c)
476 {
477 if (dco_enabled(&c->options) && dco_get_peer_stats(c) == 0)
478 {
479 int64_t tot_bytes = c->c2.tun_read_bytes + c->c2.tun_write_bytes;
480 int64_t new_bytes = tot_bytes - c->c2.inactivity_bytes;
481
482 if (new_bytes > c->options.inactivity_minimum_bytes)
483 {
484 c->c2.inactivity_bytes = tot_bytes;
485 event_timeout_reset(&c->c2.inactivity_interval);
486
487 return;
488 }
489 }
490
491 msg(M_INFO, "Inactivity timeout (--inactive), exiting");
492 register_signal(c->sig, SIGTERM, "inactive");
493 }
494
495 int
496 get_server_poll_remaining_time(struct event_timeout *server_poll_timeout)
497 {
498 update_time();
499 int remaining = event_timeout_remaining(server_poll_timeout);
500 return max_int(0, remaining);
501 }
502
503 static void
504 check_server_poll_timeout(struct context *c)
505 {
506 event_timeout_reset(&c->c2.server_poll_interval);
507 ASSERT(c->c2.tls_multi);
508 if (!tls_initial_packet_received(c->c2.tls_multi))
509 {
510 msg(M_INFO, "Server poll timeout, restarting");
511 register_signal(c->sig, SIGUSR1, "server_poll");
512 c->persist.restart_sleep_seconds = -1;
513 }
514 }
515
516 /*
517 * Schedule a signal n_seconds from now.
518 */
519 void
520 schedule_exit(struct context *c, const int n_seconds, const int signal)
521 {
522 tls_set_single_session(c->c2.tls_multi);
523 update_time();
524 reset_coarse_timers(c);
525 event_timeout_init(&c->c2.scheduled_exit, n_seconds, now);
526 c->c2.scheduled_exit_signal = signal;
527 msg(D_SCHED_EXIT, "Delayed exit in %d seconds", n_seconds);
528 }
529
530 /*
531 * Scheduled exit?
532 */
533 static void
534 check_scheduled_exit(struct context *c)
535 {
536 register_signal(c->sig, c->c2.scheduled_exit_signal, "delayed-exit");
537 }
538
539 /*
540 * Should we write timer-triggered status file.
541 */
542 static void
543 check_status_file(struct context *c)
544 {
545 if (c->c1.status_output)
546 {
547 print_status(c, c->c1.status_output);
548 }
549 }
550
551 #ifdef ENABLE_FRAGMENT
552 /*
553 * Should we deliver a datagram fragment to remote?
554 */
555 static void
556 check_fragment(struct context *c)
557 {
558 struct link_socket_info *lsi = get_link_socket_info(c);
559
560 /* OS MTU Hint? */
561 if (lsi->mtu_changed && lsi->lsa)
562 {
563 frame_adjust_path_mtu(c);
564 lsi->mtu_changed = false;
565 }
566
567 if (fragment_outgoing_defined(c->c2.fragment))
568 {
569 if (!c->c2.to_link.len)
570 {
571 /* encrypt a fragment for output to TCP/UDP port */
572 ASSERT(fragment_ready_to_send(c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment));
573 encrypt_sign(c, false);
574 }
575 }
576
577 fragment_housekeeping(c->c2.fragment, &c->c2.frame_fragment, &c->c2.timeval);
578 }
579 #endif /* ifdef ENABLE_FRAGMENT */
580
581 /*
582 * Buffer reallocation, for use with null encryption.
583 */
584 static inline void
585 buffer_turnover(const uint8_t *orig_buf, struct buffer *dest_stub, struct buffer *src_stub, struct buffer *storage)
586 {
587 if (orig_buf == src_stub->data && src_stub->data != storage->data)
588 {
589 buf_assign(storage, src_stub);
590 *dest_stub = *storage;
591 }
592 else
593 {
594 *dest_stub = *src_stub;
595 }
596 }
597
598 /*
599 * Compress, fragment, encrypt and HMAC-sign an outgoing packet.
600 * Input: c->c2.buf
601 * Output: c->c2.to_link
602 */
603 void
604 encrypt_sign(struct context *c, bool comp_frag)
605 {
606 struct context_buffers *b = c->c2.buffers;
607 const uint8_t *orig_buf = c->c2.buf.data;
608 struct crypto_options *co = NULL;
609
610 if (dco_enabled(&c->options))
611 {
612 msg(M_WARN, "Attempting to send data packet while data channel offload is in use. "
613 "Dropping packet");
614 c->c2.buf.len = 0;
615 }
616
617 /*
618 * Drop non-TLS outgoing packet if client-connect script/plugin
619 * has not yet succeeded. In non-TLS tls_multi mode is not defined
620 * and we always pass packets.
621 */
622 if (c->c2.tls_multi && c->c2.tls_multi->multi_state < CAS_CONNECT_DONE)
623 {
624 c->c2.buf.len = 0;
625 }
626
627 if (comp_frag)
628 {
629 #ifdef USE_COMP
630 /* Compress the packet. */
631 if (c->c2.comp_context)
632 {
633 (*c->c2.comp_context->alg.compress)(&c->c2.buf, b->compress_buf, c->c2.comp_context, &c->c2.frame);
634 }
635 #endif
636 #ifdef ENABLE_FRAGMENT
637 if (c->c2.fragment)
638 {
639 fragment_outgoing(c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment);
640 }
641 #endif
642 }
643
644 /* initialize work buffer with buf.headroom bytes of prepend capacity */
645 ASSERT(buf_init(&b->encrypt_buf, c->c2.frame.buf.headroom));
646
647 if (c->c2.tls_multi)
648 {
649 /* Get the key we will use to encrypt the packet. */
650 tls_pre_encrypt(c->c2.tls_multi, &c->c2.buf, &co);
651 /* If using P_DATA_V2, prepend the 1-byte opcode and 3-byte peer-id to the
652 * packet before openvpn_encrypt(), so we can authenticate the opcode too.
653 */
654 if (c->c2.buf.len > 0 && c->c2.tls_multi->use_peer_id)
655 {
656 tls_prepend_opcode_v2(c->c2.tls_multi, &b->encrypt_buf);
657 }
658 }
659 else
660 {
661 co = &c->c2.crypto_options;
662 }
663
664 /* Encrypt and authenticate the packet */
665 openvpn_encrypt(&c->c2.buf, b->encrypt_buf, co);
666
667 /* Do packet administration */
668 if (c->c2.tls_multi)
669 {
670 if (c->c2.buf.len > 0 && !c->c2.tls_multi->use_peer_id)
671 {
672 tls_prepend_opcode_v1(c->c2.tls_multi, &c->c2.buf);
673 }
674 tls_post_encrypt(c->c2.tls_multi, &c->c2.buf);
675 }
676
677 /*
678 * Get the address we will be sending the packet to.
679 */
680 link_socket_get_outgoing_addr(&c->c2.buf, get_link_socket_info(c),
681 &c->c2.to_link_addr);
682
683 /* if null encryption, copy result to read_tun_buf */
684 buffer_turnover(orig_buf, &c->c2.to_link, &c->c2.buf, &b->read_tun_buf);
685 }
686
687 /*
688 * Should we exit due to session timeout?
689 */
690 static void
691 check_session_timeout(struct context *c)
692 {
693 if (c->options.session_timeout
694 && event_timeout_trigger(&c->c2.session_interval, &c->c2.timeval,
695 ETT_DEFAULT))
696 {
697 msg(M_INFO, "Session timeout, exiting");
698 register_signal(c->sig, SIGTERM, "session-timeout");
699 }
700 }
701
702 /*
703 * Coarse timers work to 1 second resolution.
704 */
705 static void
706 process_coarse_timers(struct context *c)
707 {
708 /* flush current packet-id to file once per 60
709 * seconds if --replay-persist was specified */
710 if (packet_id_persist_enabled(&c->c1.pid_persist)
711 && event_timeout_trigger(&c->c2.packet_id_persist_interval, &c->c2.timeval, ETT_DEFAULT))
712 {
713 packet_id_persist_save(&c->c1.pid_persist);
714 }
715
716 /* Should we write timer-triggered status file */
717 if (c->c1.status_output
718 && event_timeout_trigger(&c->c1.status_output->et, &c->c2.timeval, ETT_DEFAULT))
719 {
720 check_status_file(c);
721 }
722
723 /* process connection establishment items */
724 if (event_timeout_trigger(&c->c2.wait_for_connect, &c->c2.timeval, ETT_DEFAULT))
725 {
726 check_connection_established(c);
727 }
728
729 /* see if we should send a push_request (option --pull) */
730 if (event_timeout_trigger(&c->c2.push_request_interval, &c->c2.timeval, ETT_DEFAULT))
731 {
732 check_push_request(c);
733 }
734
735 /* process --route options */
736 if (event_timeout_trigger(&c->c2.route_wakeup, &c->c2.timeval, ETT_DEFAULT))
737 {
738 check_add_routes(c);
739 }
740
741 /* check if we want to refresh the auth-token */
742 if (event_timeout_trigger(&c->c2.auth_token_renewal_interval, &c->c2.timeval, ETT_DEFAULT))
743 {
744 check_send_auth_token(c);
745 }
746
747 /* possibly exit due to --inactive */
748 if (c->options.inactivity_timeout
749 && event_timeout_trigger(&c->c2.inactivity_interval, &c->c2.timeval, ETT_DEFAULT))
750 {
751 check_inactivity_timeout(c);
752 }
753
754 if (c->sig->signal_received)
755 {
756 return;
757 }
758
759 /* kill session if time is over */
760 check_session_timeout(c);
761 if (c->sig->signal_received)
762 {
763 return;
764 }
765
766 /* restart if ping not received */
767 check_ping_restart(c);
768 if (c->sig->signal_received)
769 {
770 return;
771 }
772
773 if (c->c2.tls_multi)
774 {
775 if (c->options.ce.connect_timeout
776 && event_timeout_trigger(&c->c2.server_poll_interval, &c->c2.timeval, ETT_DEFAULT))
777 {
778 check_server_poll_timeout(c);
779 }
780 if (c->sig->signal_received)
781 {
782 return;
783 }
784 if (event_timeout_trigger(&c->c2.scheduled_exit, &c->c2.timeval, ETT_DEFAULT))
785 {
786 check_scheduled_exit(c);
787 }
788 if (c->sig->signal_received)
789 {
790 return;
791 }
792 }
793
794 /* Should we send an OCC_REQUEST message? */
795 check_send_occ_req(c);
796
797 /* Should we send an MTU load test? */
798 check_send_occ_load_test(c);
799
800 /* Should we send an OCC_EXIT message to remote? */
801 if (c->c2.explicit_exit_notification_time_wait)
802 {
803 process_explicit_exit_notification_timer_wakeup(c);
804 }
805
806 /* Should we ping the remote? */
807 check_ping_send(c);
808
809 #ifdef ENABLE_MANAGEMENT
810 if (management)
811 {
812 management_check_bytecount(c, management, &c->c2.timeval);
813 }
814 #endif /* ENABLE_MANAGEMENT */
815 }
816
817 static void
818 check_coarse_timers(struct context *c)
819 {
820 if (now < c->c2.coarse_timer_wakeup)
821 {
822 context_reschedule_sec(c, c->c2.coarse_timer_wakeup - now);
823 return;
824 }
825
826 const struct timeval save = c->c2.timeval;
827 c->c2.timeval.tv_sec = BIG_TIMEOUT;
828 c->c2.timeval.tv_usec = 0;
829 process_coarse_timers(c);
830 c->c2.coarse_timer_wakeup = now + c->c2.timeval.tv_sec;
831
832 dmsg(D_INTERVAL, "TIMER: coarse timer wakeup %" PRIi64 " seconds", (int64_t)c->c2.timeval.tv_sec);
833
834 /* Is the coarse timeout NOT the earliest one? */
835 if (c->c2.timeval.tv_sec > save.tv_sec)
836 {
837 c->c2.timeval = save;
838 }
839 }
840
841 static void
842 check_timeout_random_component_dowork(struct context *c)
843 {
844 const int update_interval = 10; /* seconds */
845 c->c2.update_timeout_random_component = now + update_interval;
846 c->c2.timeout_random_component.tv_usec = (time_t) get_random() & 0x0003FFFF;
847 c->c2.timeout_random_component.tv_sec = 0;
848
849 dmsg(D_INTERVAL, "RANDOM USEC=%ld", (long) c->c2.timeout_random_component.tv_usec);
850 }
851
852 static inline void
853 check_timeout_random_component(struct context *c)
854 {
855 if (now >= c->c2.update_timeout_random_component)
856 {
857 check_timeout_random_component_dowork(c);
858 }
859 if (c->c2.timeval.tv_sec >= 1)
860 {
861 tv_add(&c->c2.timeval, &c->c2.timeout_random_component);
862 }
863 }
864
865 /*
866 * Handle addition and removal of the 10-byte Socks5 header
867 * in UDP packets.
868 */
869
870 static inline void
871 socks_postprocess_incoming_link(struct context *c)
872 {
873 if (c->c2.link_socket->socks_proxy && c->c2.link_socket->info.proto == PROTO_UDP)
874 {
875 socks_process_incoming_udp(&c->c2.buf, &c->c2.from);
876 }
877 }
878
879 static inline void
880 socks_preprocess_outgoing_link(struct context *c,
881 struct link_socket_actual **to_addr,
882 int *size_delta)
883 {
884 if (c->c2.link_socket->socks_proxy && c->c2.link_socket->info.proto == PROTO_UDP)
885 {
886 *size_delta += socks_process_outgoing_udp(&c->c2.to_link, c->c2.to_link_addr);
887 *to_addr = &c->c2.link_socket->socks_relay;
888 }
889 }
890
891 /* undo effect of socks_preprocess_outgoing_link */
892 static inline void
893 link_socket_write_post_size_adjust(int *size,
894 int size_delta,
895 struct buffer *buf)
896 {
897 if (size_delta > 0 && *size > size_delta)
898 {
899 *size -= size_delta;
900 if (!buf_advance(buf, size_delta))
901 {
902 *size = 0;
903 }
904 }
905 }
906
907 /*
908 * Output: c->c2.buf
909 */
910
911 void
912 read_incoming_link(struct context *c)
913 {
914 /*
915 * Set up for recvfrom call to read datagram
916 * sent to our TCP/UDP port.
917 */
918 int status;
919
920 /*ASSERT (!c->c2.to_tun.len);*/
921
922 perf_push(PERF_READ_IN_LINK);
923
924 c->c2.buf = c->c2.buffers->read_link_buf;
925 ASSERT(buf_init(&c->c2.buf, c->c2.frame.buf.headroom));
926
927 status = link_socket_read(c->c2.link_socket,
928 &c->c2.buf,
929 &c->c2.from);
930
931 if (socket_connection_reset(c->c2.link_socket, status))
932 {
933 #if PORT_SHARE
934 if (port_share && socket_foreign_protocol_detected(c->c2.link_socket))
935 {
936 const struct buffer *fbuf = socket_foreign_protocol_head(c->c2.link_socket);
937 const int sd = socket_foreign_protocol_sd(c->c2.link_socket);
938 port_share_redirect(port_share, fbuf, sd);
939 register_signal(c->sig, SIGTERM, "port-share-redirect");
940 }
941 else
942 #endif
943 {
944 /* received a disconnect from a connection-oriented protocol */
945 if (event_timeout_defined(&c->c2.explicit_exit_notification_interval))
946 {
947 msg(D_STREAM_ERRORS, "Connection reset during exit notification period, ignoring [%d]", status);
948 management_sleep(1);
949 }
950 else
951 {
952 register_signal(c->sig, SIGUSR1, "connection-reset"); /* SOFT-SIGUSR1 -- TCP connection reset */
953 msg(D_STREAM_ERRORS, "Connection reset, restarting [%d]", status);
954 }
955 }
956 perf_pop();
957 return;
958 }
959
960 /* check_status() call below resets last-error code */
961 bool dco_win_timeout = tuntap_is_dco_win_timeout(c->c1.tuntap, status);
962
963 /* check recvfrom status */
964 check_status(status, "read", c->c2.link_socket, NULL);
965
966 if (dco_win_timeout)
967 {
968 trigger_ping_timeout_signal(c);
969 }
970
971 /* Remove socks header if applicable */
972 socks_postprocess_incoming_link(c);
973
974 perf_pop();
975 }
976
977 bool
978 process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, bool floated)
979 {
980 struct gc_arena gc = gc_new();
981 bool decrypt_status = false;
982
983 if (c->c2.buf.len > 0)
984 {
985 c->c2.link_read_bytes += c->c2.buf.len;
986 link_read_bytes_global += c->c2.buf.len;
987 #ifdef ENABLE_MEMSTATS
988 if (mmap_stats)
989 {
990 mmap_stats->link_read_bytes = link_read_bytes_global;
991 }
992 #endif
993 c->c2.original_recv_size = c->c2.buf.len;
994 #ifdef ENABLE_MANAGEMENT
995 if (management)
996 {
997 management_bytes_client(management, c->c2.buf.len, 0);
998 management_bytes_server(management, &c->c2.link_read_bytes, &c->c2.link_write_bytes, &c->c2.mda_context);
999 }
1000 #endif
1001 }
1002 else
1003 {
1004 c->c2.original_recv_size = 0;
1005 }
1006
1007 #ifdef ENABLE_DEBUG
1008 /* take action to corrupt packet if we are in gremlin test mode */
1009 if (c->options.gremlin)
1010 {
1011 if (!ask_gremlin(c->options.gremlin))
1012 {
1013 c->c2.buf.len = 0;
1014 }
1015 corrupt_gremlin(&c->c2.buf, c->options.gremlin);
1016 }
1017 #endif
1018
1019 /* log incoming packet */
1020 #ifdef LOG_RW
1021 if (c->c2.log_rw && c->c2.buf.len > 0)
1022 {
1023 fprintf(stderr, "R");
1024 }
1025 #endif
1026 msg(D_LINK_RW, "%s READ [%d] from %s: %s",
1027 proto2ascii(lsi->proto, lsi->af, true),
1028 BLEN(&c->c2.buf),
1029 print_link_socket_actual(&c->c2.from, &gc),
1030 PROTO_DUMP(&c->c2.buf, &gc));
1031
1032 /*
1033 * Good, non-zero length packet received.
1034 * Commence multi-stage processing of packet,
1035 * such as authenticate, decrypt, decompress.
1036 * If any stage fails, it sets buf.len to 0 or -1,
1037 * telling downstream stages to ignore the packet.
1038 */
1039 if (c->c2.buf.len > 0)
1040 {
1041 struct crypto_options *co = NULL;
1042 const uint8_t *ad_start = NULL;
1043 if (!link_socket_verify_incoming_addr(&c->c2.buf, lsi, &c->c2.from))
1044 {
1045 link_socket_bad_incoming_addr(&c->c2.buf, lsi, &c->c2.from);
1046 }
1047
1048 if (c->c2.tls_multi)
1049 {
1050 uint8_t opcode = *BPTR(&c->c2.buf) >> P_OPCODE_SHIFT;
1051
1052 /*
1053 * If DCO is enabled, the kernel drivers require that the
1054 * other end only sends P_DATA_V2 packets. V1 are unknown
1055 * to kernel and passed to userland, but we cannot handle them
1056 * either because crypto context is missing - so drop the packet.
1057 *
1058 * This can only happen with particular old (2.4.0-2.4.4) servers.
1059 */
1060 if ((opcode == P_DATA_V1) && dco_enabled(&c->options))
1061 {
1062 msg(D_LINK_ERRORS,
1063 "Data Channel Offload doesn't support DATA_V1 packets. "
1064 "Upgrade your server to 2.4.5 or newer.");
1065 c->c2.buf.len = 0;
1066 }
1067
1068 /*
1069 * If tls_pre_decrypt returns true, it means the incoming
1070 * packet was a good TLS control channel packet. If so, TLS code
1071 * will deal with the packet and set buf.len to 0 so downstream
1072 * stages ignore it.
1073 *
1074 * If the packet is a data channel packet, tls_pre_decrypt
1075 * will load crypto_options with the correct encryption key
1076 * and return false.
1077 */
1078 if (tls_pre_decrypt(c->c2.tls_multi, &c->c2.from, &c->c2.buf, &co,
1079 floated, &ad_start))
1080 {
1081 interval_action(&c->c2.tmp_int);
1082
1083 /* reset packet received timer if TLS packet */
1084 if (c->options.ping_rec_timeout)
1085 {
1086 event_timeout_reset(&c->c2.ping_rec_interval);
1087 }
1088 }
1089 }
1090 else
1091 {
1092 co = &c->c2.crypto_options;
1093 }
1094
1095 /*
1096 * Drop non-TLS packet if client-connect script/plugin and cipher selection
1097 * has not yet succeeded. In non-TLS mode tls_multi is not defined
1098 * and we always pass packets.
1099 */
1100 if (c->c2.tls_multi && c->c2.tls_multi->multi_state < CAS_CONNECT_DONE)
1101 {
1102 c->c2.buf.len = 0;
1103 }
1104
1105 /* authenticate and decrypt the incoming packet */
1106 decrypt_status = openvpn_decrypt(&c->c2.buf, c->c2.buffers->decrypt_buf,
1107 co, &c->c2.frame, ad_start);
1108
1109 if (!decrypt_status && link_socket_connection_oriented(c->c2.link_socket))
1110 {
1111 /* decryption errors are fatal in TCP mode */
1112 register_signal(c->sig, SIGUSR1, "decryption-error"); /* SOFT-SIGUSR1 -- decryption error in TCP mode */
1113 msg(D_STREAM_ERRORS, "Fatal decryption error (process_incoming_link), restarting");
1114 }
1115 }
1116 else
1117 {
1118 buf_reset(&c->c2.to_tun);
1119 }
1120 gc_free(&gc);
1121
1122 return decrypt_status;
1123 }
1124
1125 void
1126 process_incoming_link_part2(struct context *c, struct link_socket_info *lsi, const uint8_t *orig_buf)
1127 {
1128 if (c->c2.buf.len > 0)
1129 {
1130 #ifdef ENABLE_FRAGMENT
1131 if (c->c2.fragment)
1132 {
1133 fragment_incoming(c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment);
1134 }
1135 #endif
1136
1137 #ifdef USE_COMP
1138 /* decompress the incoming packet */
1139 if (c->c2.comp_context)
1140 {
1141 (*c->c2.comp_context->alg.decompress)(&c->c2.buf, c->c2.buffers->decompress_buf, c->c2.comp_context, &c->c2.frame);
1142 }
1143 #endif
1144
1145 #ifdef PACKET_TRUNCATION_CHECK
1146 /* if (c->c2.buf.len > 1) --c->c2.buf.len; */
1147 ipv4_packet_size_verify(BPTR(&c->c2.buf),
1148 BLEN(&c->c2.buf),
1149 TUNNEL_TYPE(c->c1.tuntap),
1150 "POST_DECRYPT",
1151 &c->c2.n_trunc_post_decrypt);
1152 #endif
1153
1154 /*
1155 * Set our "official" outgoing address, since
1156 * if buf.len is non-zero, we know the packet
1157 * authenticated. In TLS mode we do nothing
1158 * because TLS mode takes care of source address
1159 * authentication.
1160 *
1161 * Also, update the persisted version of our packet-id.
1162 */
1163 if (!TLS_MODE(c) && c->c2.buf.len > 0)
1164 {
1165 link_socket_set_outgoing_addr(lsi, &c->c2.from, NULL, c->c2.es);
1166 }
1167
1168 /* reset packet received timer */
1169 if (c->options.ping_rec_timeout && c->c2.buf.len > 0)
1170 {
1171 event_timeout_reset(&c->c2.ping_rec_interval);
1172 }
1173
1174 /* increment authenticated receive byte count */
1175 if (c->c2.buf.len > 0)
1176 {
1177 c->c2.link_read_bytes_auth += c->c2.buf.len;
1178 c->c2.max_recv_size_local = max_int(c->c2.original_recv_size, c->c2.max_recv_size_local);
1179 }
1180
1181 /* Did we just receive an openvpn ping packet? */
1182 if (is_ping_msg(&c->c2.buf))
1183 {
1184 dmsg(D_PING, "RECEIVED PING PACKET");
1185 c->c2.buf.len = 0; /* drop packet */
1186 }
1187
1188 /* Did we just receive an OCC packet? */
1189 if (is_occ_msg(&c->c2.buf))
1190 {
1191 process_received_occ_msg(c);
1192 }
1193
1194 buffer_turnover(orig_buf, &c->c2.to_tun, &c->c2.buf, &c->c2.buffers->read_link_buf);
1195
1196 /* to_tun defined + unopened tuntap can cause deadlock */
1197 if (!tuntap_defined(c->c1.tuntap))
1198 {
1199 c->c2.to_tun.len = 0;
1200 }
1201 }
1202 else
1203 {
1204 buf_reset(&c->c2.to_tun);
1205 }
1206 }
1207
1208 static void
1209 process_incoming_link(struct context *c)
1210 {
1211 perf_push(PERF_PROC_IN_LINK);
1212
1213 struct link_socket_info *lsi = get_link_socket_info(c);
1214 const uint8_t *orig_buf = c->c2.buf.data;
1215
1216 process_incoming_link_part1(c, lsi, false);
1217 process_incoming_link_part2(c, lsi, orig_buf);
1218
1219 perf_pop();
1220 }
1221
1222 static void
1223 process_incoming_dco(struct context *c)
1224 {
1225 #if defined(ENABLE_DCO) && (defined(TARGET_LINUX) || defined(TARGET_FREEBSD))
1226 dco_context_t *dco = &c->c1.tuntap->dco;
1227
1228 dco_do_read(dco);
1229
1230 /* FreeBSD currently sends us removal notifcation with the old peer-id in
1231 * p2p mode with the ping timeout reason, so ignore that one to not shoot
1232 * ourselves in the foot and removing the just established session */
1233 if (dco->dco_message_peer_id != c->c2.tls_multi->dco_peer_id)
1234 {
1235 msg(D_DCO_DEBUG, "%s: received message for mismatching peer-id %d, "
1236 "expected %d", __func__, dco->dco_message_peer_id,
1237 c->c2.tls_multi->dco_peer_id);
1238 return;
1239 }
1240
1241 switch (dco->dco_message_type)
1242 {
1243 case OVPN_CMD_DEL_PEER:
1244 if (dco->dco_del_peer_reason == OVPN_DEL_PEER_REASON_EXPIRED)
1245 {
1246 msg(D_DCO_DEBUG, "%s: received peer expired notification of for peer-id "
1247 "%d", __func__, dco->dco_message_peer_id);
1248 trigger_ping_timeout_signal(c);
1249 return;
1250 }
1251 break;
1252
1253 case OVPN_CMD_SWAP_KEYS:
1254 msg(D_DCO_DEBUG, "%s: received key rotation notification for peer-id %d",
1255 __func__, dco->dco_message_peer_id);
1256 tls_session_soft_reset(c->c2.tls_multi);
1257 break;
1258
1259 default:
1260 msg(D_DCO_DEBUG, "%s: received message of type %u - ignoring", __func__,
1261 dco->dco_message_type);
1262 return;
1263 }
1264
1265 #endif /* if defined(ENABLE_DCO) && (defined(TARGET_LINUX) || defined(TARGET_FREEBSD)) */
1266 }
1267
1268 /*
1269 * Output: c->c2.buf
1270 */
1271
1272 void
1273 read_incoming_tun(struct context *c)
1274 {
1275 /*
1276 * Setup for read() call on TUN/TAP device.
1277 */
1278 /*ASSERT (!c->c2.to_link.len);*/
1279
1280 perf_push(PERF_READ_IN_TUN);
1281
1282 c->c2.buf = c->c2.buffers->read_tun_buf;
1283
1284 #ifdef _WIN32
1285 if (c->c1.tuntap->windows_driver == WINDOWS_DRIVER_WINTUN)
1286 {
1287 read_wintun(c->c1.tuntap, &c->c2.buf);
1288 if (c->c2.buf.len == -1)
1289 {
1290 register_signal(c->sig, SIGHUP, "tun-abort");
1291 c->persist.restart_sleep_seconds = 1;
1292 msg(M_INFO, "Wintun read error, restarting");
1293 perf_pop();
1294 return;
1295 }
1296 }
1297 else
1298 {
1299 sockethandle_t sh = { .is_handle = true, .h = c->c1.tuntap->hand };
1300 sockethandle_finalize(sh, &c->c1.tuntap->reads, &c->c2.buf, NULL);
1301 }
1302 #else /* ifdef _WIN32 */
1303 ASSERT(buf_init(&c->c2.buf, c->c2.frame.buf.headroom));
1304 ASSERT(buf_safe(&c->c2.buf, c->c2.frame.buf.payload_size));
1305 c->c2.buf.len = read_tun(c->c1.tuntap, BPTR(&c->c2.buf), c->c2.frame.buf.payload_size);
1306 #endif /* ifdef _WIN32 */
1307
1308 #ifdef PACKET_TRUNCATION_CHECK
1309 ipv4_packet_size_verify(BPTR(&c->c2.buf),
1310 BLEN(&c->c2.buf),
1311 TUNNEL_TYPE(c->c1.tuntap),
1312 "READ_TUN",
1313 &c->c2.n_trunc_tun_read);
1314 #endif
1315
1316 /* Was TUN/TAP interface stopped? */
1317 if (tuntap_stop(c->c2.buf.len))
1318 {
1319 register_signal(c->sig, SIGTERM, "tun-stop");
1320 msg(M_INFO, "TUN/TAP interface has been stopped, exiting");
1321 perf_pop();
1322 return;
1323 }
1324
1325 /* Was TUN/TAP I/O operation aborted? */
1326 if (tuntap_abort(c->c2.buf.len))
1327 {
1328 register_signal(c->sig, SIGHUP, "tun-abort");
1329 c->persist.restart_sleep_seconds = 10;
1330 msg(M_INFO, "TUN/TAP I/O operation aborted, restarting");
1331 perf_pop();
1332 return;
1333 }
1334
1335 /* Check the status return from read() */
1336 check_status(c->c2.buf.len, "read from TUN/TAP", NULL, c->c1.tuntap);
1337
1338 perf_pop();
1339 }
1340
1341 /**
1342 * Drops UDP packets which OS decided to route via tun.
1343 *
1344 * On Windows and OS X when netwotk adapter is disabled or
1345 * disconnected, platform starts to use tun as external interface.
1346 * When packet is sent to tun, it comes to openvpn, encapsulated
1347 * and sent to routing table, which sends it again to tun.
1348 */
1349 static void
1350 drop_if_recursive_routing(struct context *c, struct buffer *buf)
1351 {
1352 bool drop = false;
1353 struct openvpn_sockaddr tun_sa;
1354 int ip_hdr_offset = 0;
1355
1356 if (c->c2.to_link_addr == NULL) /* no remote addr known */
1357 {
1358 return;
1359 }
1360
1361 tun_sa = c->c2.to_link_addr->dest;
1362
1363 int proto_ver = get_tun_ip_ver(TUNNEL_TYPE(c->c1.tuntap), &c->c2.buf, &ip_hdr_offset);
1364
1365 if (proto_ver == 4)
1366 {
1367 const struct openvpn_iphdr *pip;
1368
1369 /* make sure we got whole IP header */
1370 if (BLEN(buf) < ((int) sizeof(struct openvpn_iphdr) + ip_hdr_offset))
1371 {
1372 return;
1373 }
1374
1375 /* skip ipv4 packets for ipv6 tun */
1376 if (tun_sa.addr.sa.sa_family != AF_INET)
1377 {
1378 return;
1379 }
1380
1381 pip = (struct openvpn_iphdr *) (BPTR(buf) + ip_hdr_offset);
1382
1383 /* drop packets with same dest addr as gateway */
1384 if (tun_sa.addr.in4.sin_addr.s_addr == pip->daddr)
1385 {
1386 drop = true;
1387 }
1388 }
1389 else if (proto_ver == 6)
1390 {
1391 const struct openvpn_ipv6hdr *pip6;
1392
1393 /* make sure we got whole IPv6 header */
1394 if (BLEN(buf) < ((int) sizeof(struct openvpn_ipv6hdr) + ip_hdr_offset))
1395 {
1396 return;
1397 }
1398
1399 /* skip ipv6 packets for ipv4 tun */
1400 if (tun_sa.addr.sa.sa_family != AF_INET6)
1401 {
1402 return;
1403 }
1404
1405 /* drop packets with same dest addr as gateway */
1406 pip6 = (struct openvpn_ipv6hdr *) (BPTR(buf) + ip_hdr_offset);
1407 if (IN6_ARE_ADDR_EQUAL(&tun_sa.addr.in6.sin6_addr, &pip6->daddr))
1408 {
1409 drop = true;
1410 }
1411 }
1412
1413 if (drop)
1414 {
1415 struct gc_arena gc = gc_new();
1416
1417 c->c2.buf.len = 0;
1418
1419 msg(D_LOW, "Recursive routing detected, drop tun packet to %s",
1420 print_link_socket_actual(c->c2.to_link_addr, &gc));
1421 gc_free(&gc);
1422 }
1423 }
1424
1425 /*
1426 * Input: c->c2.buf
1427 * Output: c->c2.to_link
1428 */
1429
1430 void
1431 process_incoming_tun(struct context *c)
1432 {
1433 struct gc_arena gc = gc_new();
1434
1435 perf_push(PERF_PROC_IN_TUN);
1436
1437 if (c->c2.buf.len > 0)
1438 {
1439 c->c2.tun_read_bytes += c->c2.buf.len;
1440 }
1441
1442 #ifdef LOG_RW
1443 if (c->c2.log_rw && c->c2.buf.len > 0)
1444 {
1445 fprintf(stderr, "r");
1446 }
1447 #endif
1448
1449 /* Show packet content */
1450 dmsg(D_TUN_RW, "TUN READ [%d]", BLEN(&c->c2.buf));
1451
1452 if (c->c2.buf.len > 0)
1453 {
1454 if ((c->options.mode == MODE_POINT_TO_POINT) && (!c->options.allow_recursive_routing))
1455 {
1456 drop_if_recursive_routing(c, &c->c2.buf);
1457 }
1458 /*
1459 * The --passtos and --mssfix options require
1460 * us to examine the IP header (IPv4 or IPv6).
1461 */
1462 unsigned int flags = PIPV4_PASSTOS | PIP_MSSFIX | PIPV4_CLIENT_NAT
1463 | PIPV6_ICMP_NOHOST_CLIENT;
1464 process_ip_header(c, flags, &c->c2.buf);
1465
1466 #ifdef PACKET_TRUNCATION_CHECK
1467 /* if (c->c2.buf.len > 1) --c->c2.buf.len; */
1468 ipv4_packet_size_verify(BPTR(&c->c2.buf),
1469 BLEN(&c->c2.buf),
1470 TUNNEL_TYPE(c->c1.tuntap),
1471 "PRE_ENCRYPT",
1472 &c->c2.n_trunc_pre_encrypt);
1473 #endif
1474
1475 }
1476 if (c->c2.buf.len > 0)
1477 {
1478 encrypt_sign(c, true);
1479 }
1480 else
1481 {
1482 buf_reset(&c->c2.to_link);
1483 }
1484 perf_pop();
1485 gc_free(&gc);
1486 }
1487
1488 /**
1489 * Forges a IPv6 ICMP packet with a no route to host error code from the
1490 * IPv6 packet in buf and sends it directly back to the client via the tun
1491 * device when used on a client and via the link if used on the server.
1492 *
1493 * @param buf - The buf containing the packet for which the icmp6
1494 * unreachable should be constructed.
1495 *
1496 * @param client - determines whether to the send packet back via tun or link
1497 */
1498 void
1499 ipv6_send_icmp_unreachable(struct context *c, struct buffer *buf, bool client)
1500 {
1501 #define MAX_ICMPV6LEN 1280
1502 struct openvpn_icmp6hdr icmp6out;
1503 CLEAR(icmp6out);
1504
1505 /*
1506 * Get a buffer to the ip packet, is_ipv6 automatically forwards
1507 * the buffer to the ip packet
1508 */
1509 struct buffer inputipbuf = *buf;
1510
1511 is_ipv6(TUNNEL_TYPE(c->c1.tuntap), &inputipbuf);
1512
1513 if (BLEN(&inputipbuf) < (int)sizeof(struct openvpn_ipv6hdr))
1514 {
1515 return;
1516 }
1517
1518 const struct openvpn_ipv6hdr *pip6 = (struct openvpn_ipv6hdr *)BPTR(&inputipbuf);
1519
1520 /* Copy version, traffic class, flow label from input packet */
1521 struct openvpn_ipv6hdr pip6out = *pip6;
1522
1523 pip6out.version_prio = pip6->version_prio;
1524 pip6out.daddr = pip6->saddr;
1525
1526 /*
1527 * Use the IPv6 remote address if we have one, otherwise use a fake one
1528 * using the remote address is preferred since it makes debugging and
1529 * understanding where the ICMPv6 error originates easier
1530 */
1531 if (c->options.ifconfig_ipv6_remote)
1532 {
1533 inet_pton(AF_INET6, c->options.ifconfig_ipv6_remote, &pip6out.saddr);
1534 }
1535 else
1536 {
1537 inet_pton(AF_INET6, "fe80::7", &pip6out.saddr);
1538 }
1539
1540 pip6out.nexthdr = OPENVPN_IPPROTO_ICMPV6;
1541
1542 /*
1543 * The ICMPv6 unreachable code worked best in my (arne) tests with Windows,
1544 * Linux and Android. Windows did not like the administratively prohibited
1545 * return code (no fast fail)
1546 */
1547 icmp6out.icmp6_type = OPENVPN_ICMP6_DESTINATION_UNREACHABLE;
1548 icmp6out.icmp6_code = OPENVPN_ICMP6_DU_NOROUTE;
1549
1550 int icmpheader_len = sizeof(struct openvpn_ipv6hdr)
1551 + sizeof(struct openvpn_icmp6hdr);
1552 int totalheader_len = icmpheader_len;
1553
1554 if (TUNNEL_TYPE(c->c1.tuntap) == DEV_TYPE_TAP)
1555 {
1556 totalheader_len += sizeof(struct openvpn_ethhdr);
1557 }
1558
1559 /*
1560 * Calculate size for payload, defined in the standard that the resulting
1561 * frame should be <= 1280 and have as much as possible of the original
1562 * packet
1563 */
1564 int max_payload_size = min_int(MAX_ICMPV6LEN,
1565 c->c2.frame.tun_mtu - icmpheader_len);
1566 int payload_len = min_int(max_payload_size, BLEN(&inputipbuf));
1567
1568 pip6out.payload_len = htons(sizeof(struct openvpn_icmp6hdr) + payload_len);
1569
1570 /* Construct the packet as outgoing packet back to the client */
1571 struct buffer *outbuf;
1572 if (client)
1573 {
1574 c->c2.to_tun = c->c2.buffers->aux_buf;
1575 outbuf = &(c->c2.to_tun);
1576 }
1577 else
1578 {
1579 c->c2.to_link = c->c2.buffers->aux_buf;
1580 outbuf = &(c->c2.to_link);
1581 }
1582 ASSERT(buf_init(outbuf, totalheader_len));
1583
1584 /* Fill the end of the buffer with original packet */
1585 ASSERT(buf_safe(outbuf, payload_len));
1586 ASSERT(buf_copy_n(outbuf, &inputipbuf, payload_len));
1587
1588 /* ICMP Header, copy into buffer to allow checksum calculation */
1589 ASSERT(buf_write_prepend(outbuf, &icmp6out, sizeof(struct openvpn_icmp6hdr)));
1590
1591 /* Calculate checksum over the packet and write to header */
1592
1593 uint16_t new_csum = ip_checksum(AF_INET6, BPTR(outbuf), BLEN(outbuf),
1594 (const uint8_t *)&pip6out.saddr,
1595 (uint8_t *)&pip6out.daddr, OPENVPN_IPPROTO_ICMPV6);
1596 ((struct openvpn_icmp6hdr *) BPTR(outbuf))->icmp6_cksum = htons(new_csum);
1597
1598
1599 /* IPv6 Header */
1600 ASSERT(buf_write_prepend(outbuf, &pip6out, sizeof(struct openvpn_ipv6hdr)));
1601
1602 /*
1603 * Tap mode, we also need to create an Ethernet header.
1604 */
1605 if (TUNNEL_TYPE(c->c1.tuntap) == DEV_TYPE_TAP)
1606 {
1607 if (BLEN(buf) < (int)sizeof(struct openvpn_ethhdr))
1608 {
1609 return;
1610 }
1611
1612 const struct openvpn_ethhdr *orig_ethhdr = (struct openvpn_ethhdr *) BPTR(buf);
1613
1614 /* Copy frametype and reverse source/destination for the response */
1615 struct openvpn_ethhdr ethhdr;
1616 memcpy(ethhdr.source, orig_ethhdr->dest, OPENVPN_ETH_ALEN);
1617 memcpy(ethhdr.dest, orig_ethhdr->source, OPENVPN_ETH_ALEN);
1618 ethhdr.proto = htons(OPENVPN_ETH_P_IPV6);
1619 ASSERT(buf_write_prepend(outbuf, &ethhdr, sizeof(struct openvpn_ethhdr)));
1620 }
1621 #undef MAX_ICMPV6LEN
1622 }
1623
1624 void
1625 process_ip_header(struct context *c, unsigned int flags, struct buffer *buf)
1626 {
1627 if (!c->options.ce.mssfix)
1628 {
1629 flags &= ~PIP_MSSFIX;
1630 }
1631 #if PASSTOS_CAPABILITY
1632 if (!c->options.passtos)
1633 {
1634 flags &= ~PIPV4_PASSTOS;
1635 }
1636 #endif
1637 if (!c->options.client_nat)
1638 {
1639 flags &= ~PIPV4_CLIENT_NAT;
1640 }
1641 if (!c->options.route_gateway_via_dhcp)
1642 {
1643 flags &= ~PIPV4_EXTRACT_DHCP_ROUTER;
1644 }
1645 if (!c->options.block_ipv6)
1646 {
1647 flags &= ~(PIPV6_ICMP_NOHOST_CLIENT | PIPV6_ICMP_NOHOST_SERVER);
1648 }
1649
1650 if (buf->len > 0)
1651 {
1652 struct buffer ipbuf = *buf;
1653 if (is_ipv4(TUNNEL_TYPE(c->c1.tuntap), &ipbuf))
1654 {
1655 #if PASSTOS_CAPABILITY
1656 /* extract TOS from IP header */
1657 if (flags & PIPV4_PASSTOS)
1658 {
1659 link_socket_extract_tos(c->c2.link_socket, &ipbuf);
1660 }
1661 #endif
1662
1663 /* possibly alter the TCP MSS */
1664 if (flags & PIP_MSSFIX)
1665 {
1666 mss_fixup_ipv4(&ipbuf, c->c2.frame.mss_fix);
1667 }
1668
1669 /* possibly do NAT on packet */
1670 if ((flags & PIPV4_CLIENT_NAT) && c->options.client_nat)
1671 {
1672 const int direction = (flags & PIP_OUTGOING) ? CN_INCOMING : CN_OUTGOING;
1673 client_nat_transform(c->options.client_nat, &ipbuf, direction);
1674 }
1675 /* possibly extract a DHCP router message */
1676 if (flags & PIPV4_EXTRACT_DHCP_ROUTER)
1677 {
1678 const in_addr_t dhcp_router = dhcp_extract_router_msg(&ipbuf);
1679 if (dhcp_router)
1680 {
1681 route_list_add_vpn_gateway(c->c1.route_list, c->c2.es, dhcp_router);
1682 }
1683 }
1684 }
1685 else if (is_ipv6(TUNNEL_TYPE(c->c1.tuntap), &ipbuf))
1686 {
1687 /* possibly alter the TCP MSS */
1688 if (flags & PIP_MSSFIX)
1689 {
1690 mss_fixup_ipv6(&ipbuf, c->c2.frame.mss_fix);
1691 }
1692 if (!(flags & PIP_OUTGOING) && (flags
1693 &(PIPV6_ICMP_NOHOST_CLIENT | PIPV6_ICMP_NOHOST_SERVER)))
1694 {
1695 ipv6_send_icmp_unreachable(c, buf,
1696 (bool)(flags & PIPV6_ICMP_NOHOST_CLIENT));
1697 /* Drop the IPv6 packet */
1698 buf->len = 0;
1699 }
1700
1701 }
1702 }
1703 }
1704
1705 /*
1706 * Input: c->c2.to_link
1707 */
1708
1709 void
1710 process_outgoing_link(struct context *c)
1711 {
1712 struct gc_arena gc = gc_new();
1713 int error_code = 0;
1714
1715 perf_push(PERF_PROC_OUT_LINK);
1716
1717 if (c->c2.to_link.len > 0 && c->c2.to_link.len <= c->c2.frame.buf.payload_size)
1718 {
1719 /*
1720 * Setup for call to send/sendto which will send
1721 * packet to remote over the TCP/UDP port.
1722 */
1723 int size = 0;
1724 ASSERT(link_socket_actual_defined(c->c2.to_link_addr));
1725
1726 #ifdef ENABLE_DEBUG
1727 /* In gremlin-test mode, we may choose to drop this packet */
1728 if (!c->options.gremlin || ask_gremlin(c->options.gremlin))
1729 #endif
1730 {
1731 /*
1732 * Let the traffic shaper know how many bytes
1733 * we wrote.
1734 */
1735 if (c->options.shaper)
1736 {
1737 int overhead = datagram_overhead(c->c2.to_link_addr->dest.addr.sa.sa_family,
1738 c->options.ce.proto);
1739 shaper_wrote_bytes(&c->c2.shaper,
1740 BLEN(&c->c2.to_link) + overhead);
1741 }
1742
1743 /*
1744 * Let the pinger know that we sent a packet.
1745 */
1746 if (c->options.ping_send_timeout)
1747 {
1748 event_timeout_reset(&c->c2.ping_send_interval);
1749 }
1750
1751 #if PASSTOS_CAPABILITY
1752 /* Set TOS */
1753 link_socket_set_tos(c->c2.link_socket);
1754 #endif
1755
1756 /* Log packet send */
1757 #ifdef LOG_RW
1758 if (c->c2.log_rw)
1759 {
1760 fprintf(stderr, "W");
1761 }
1762 #endif
1763 msg(D_LINK_RW, "%s WRITE [%d] to %s: %s",
1764 proto2ascii(c->c2.link_socket->info.proto, c->c2.link_socket->info.af, true),
1765 BLEN(&c->c2.to_link),
1766 print_link_socket_actual(c->c2.to_link_addr, &gc),
1767 PROTO_DUMP(&c->c2.to_link, &gc));
1768
1769 /* Packet send complexified by possible Socks5 usage */
1770 {
1771 struct link_socket_actual *to_addr = c->c2.to_link_addr;
1772 int size_delta = 0;
1773
1774 /* If Socks5 over UDP, prepend header */
1775 socks_preprocess_outgoing_link(c, &to_addr, &size_delta);
1776
1777 /* Send packet */
1778 size = link_socket_write(c->c2.link_socket, &c->c2.to_link, to_addr);
1779
1780 /* Undo effect of prepend */
1781 link_socket_write_post_size_adjust(&size, size_delta, &c->c2.to_link);
1782 }
1783
1784 if (size > 0)
1785 {
1786 c->c2.max_send_size_local = max_int(size, c->c2.max_send_size_local);
1787 c->c2.link_write_bytes += size;
1788 link_write_bytes_global += size;
1789 #ifdef ENABLE_MEMSTATS
1790 if (mmap_stats)
1791 {
1792 mmap_stats->link_write_bytes = link_write_bytes_global;
1793 }
1794 #endif
1795 #ifdef ENABLE_MANAGEMENT
1796 if (management)
1797 {
1798 management_bytes_client(management, 0, size);
1799 management_bytes_server(management, &c->c2.link_read_bytes, &c->c2.link_write_bytes, &c->c2.mda_context);
1800 }
1801 #endif
1802 }
1803 }
1804
1805 /* Check return status */
1806 error_code = openvpn_errno();
1807 check_status(size, "write", c->c2.link_socket, NULL);
1808
1809 if (size > 0)
1810 {
1811 /* Did we write a different size packet than we intended? */
1812 if (size != BLEN(&c->c2.to_link))
1813 {
1814 msg(D_LINK_ERRORS,
1815 "TCP/UDP packet was truncated/expanded on write to %s (tried=%d,actual=%d)",
1816 print_link_socket_actual(c->c2.to_link_addr, &gc),
1817 BLEN(&c->c2.to_link),
1818 size);
1819 }
1820 }
1821
1822 /* if not a ping/control message, indicate activity regarding --inactive parameter */
1823 if (c->c2.buf.len > 0)
1824 {
1825 register_activity(c, size);
1826 }
1827
1828 /* for unreachable network and "connecting" state switch to the next host */
1829
1830 bool unreachable = error_code ==
1831 #ifdef _WIN32
1832 WSAENETUNREACH;
1833 #else
1834 ENETUNREACH;
1835 #endif
1836 if (size < 0 && unreachable && c->c2.tls_multi
1837 && !tls_initial_packet_received(c->c2.tls_multi) && c->options.mode == MODE_POINT_TO_POINT)
1838 {
1839 msg(M_INFO, "Network unreachable, restarting");
1840 register_signal(c->sig, SIGUSR1, "network-unreachable");
1841 }
1842 }
1843 else
1844 {
1845 if (c->c2.to_link.len > 0)
1846 {
1847 msg(D_LINK_ERRORS, "TCP/UDP packet too large on write to %s (tried=%d,max=%d)",
1848 print_link_socket_actual(c->c2.to_link_addr, &gc),
1849 c->c2.to_link.len,
1850 c->c2.frame.buf.payload_size);
1851 }
1852 }
1853
1854 buf_reset(&c->c2.to_link);
1855
1856 perf_pop();
1857 gc_free(&gc);
1858 }
1859
1860 /*
1861 * Input: c->c2.to_tun
1862 */
1863
1864 void
1865 process_outgoing_tun(struct context *c)
1866 {
1867 /*
1868 * Set up for write() call to TUN/TAP
1869 * device.
1870 */
1871 if (c->c2.to_tun.len <= 0)
1872 {
1873 return;
1874 }
1875
1876 perf_push(PERF_PROC_OUT_TUN);
1877
1878 /*
1879 * The --mssfix option requires
1880 * us to examine the IP header (IPv4 or IPv6).
1881 */
1882 process_ip_header(c,
1883 PIP_MSSFIX | PIPV4_EXTRACT_DHCP_ROUTER | PIPV4_CLIENT_NAT | PIP_OUTGOING,
1884 &c->c2.to_tun);
1885
1886 if (c->c2.to_tun.len <= c->c2.frame.buf.payload_size)
1887 {
1888 /*
1889 * Write to TUN/TAP device.
1890 */
1891 int size;
1892
1893 #ifdef LOG_RW
1894 if (c->c2.log_rw)
1895 {
1896 fprintf(stderr, "w");
1897 }
1898 #endif
1899 dmsg(D_TUN_RW, "TUN WRITE [%d]", BLEN(&c->c2.to_tun));
1900
1901 #ifdef PACKET_TRUNCATION_CHECK
1902 ipv4_packet_size_verify(BPTR(&c->c2.to_tun),
1903 BLEN(&c->c2.to_tun),
1904 TUNNEL_TYPE(c->c1.tuntap),
1905 "WRITE_TUN",
1906 &c->c2.n_trunc_tun_write);
1907 #endif
1908
1909 #ifdef _WIN32
1910 size = write_tun_buffered(c->c1.tuntap, &c->c2.to_tun);
1911 #else
1912 size = write_tun(c->c1.tuntap, BPTR(&c->c2.to_tun), BLEN(&c->c2.to_tun));
1913 #endif
1914
1915 if (size > 0)
1916 {
1917 c->c2.tun_write_bytes += size;
1918 }
1919 check_status(size, "write to TUN/TAP", NULL, c->c1.tuntap);
1920
1921 /* check written packet size */
1922 if (size > 0)
1923 {
1924 /* Did we write a different size packet than we intended? */
1925 if (size != BLEN(&c->c2.to_tun))
1926 {
1927 msg(D_LINK_ERRORS,
1928 "TUN/TAP packet was destructively fragmented on write to %s (tried=%d,actual=%d)",
1929 c->c1.tuntap->actual_name,
1930 BLEN(&c->c2.to_tun),
1931 size);
1932 }
1933
1934 /* indicate activity regarding --inactive parameter */
1935 register_activity(c, size);
1936 }
1937 }
1938 else
1939 {
1940 /*
1941 * This should never happen, probably indicates some kind
1942 * of MTU mismatch.
1943 */
1944 msg(D_LINK_ERRORS, "tun packet too large on write (tried=%d,max=%d)",
1945 c->c2.to_tun.len,
1946 c->c2.frame.buf.payload_size);
1947 }
1948
1949 buf_reset(&c->c2.to_tun);
1950
1951 perf_pop();
1952 }
1953
1954 void
1955 pre_select(struct context *c)
1956 {
1957 /* make sure current time (now) is updated on function entry */
1958
1959 /*
1960 * Start with an effectively infinite timeout, then let it
1961 * reduce to a timeout that reflects the component which
1962 * needs the earliest service.
1963 */
1964 c->c2.timeval.tv_sec = BIG_TIMEOUT;
1965 c->c2.timeval.tv_usec = 0;
1966
1967 #if defined(_WIN32)
1968 if (check_debug_level(D_TAP_WIN_DEBUG))
1969 {
1970 c->c2.timeval.tv_sec = 1;
1971 if (tuntap_defined(c->c1.tuntap))
1972 {
1973 tun_show_debug(c->c1.tuntap);
1974 }
1975 }
1976 #endif
1977
1978 /* check coarse timers? */
1979 check_coarse_timers(c);
1980 if (c->sig->signal_received)
1981 {
1982 return;
1983 }
1984
1985 /* If tls is enabled, do tls control channel packet processing. */
1986 if (c->c2.tls_multi)
1987 {
1988 check_tls(c);
1989 }
1990
1991 /* In certain cases, TLS errors will require a restart */
1992 check_tls_errors(c);
1993 if (c->sig->signal_received)
1994 {
1995 return;
1996 }
1997
1998 /* check for incoming control messages on the control channel like
1999 * push request/reply, or authentication failure and 2FA messages */
2000 if (tls_test_payload_len(c->c2.tls_multi) > 0)
2001 {
2002 check_incoming_control_channel(c);
2003 }
2004
2005 /* Should we send an OCC message? */
2006 check_send_occ_msg(c);
2007
2008 #ifdef ENABLE_FRAGMENT
2009 /* Should we deliver a datagram fragment to remote? */
2010 if (c->c2.fragment)
2011 {
2012 check_fragment(c);
2013 }
2014 #endif
2015
2016 /* Update random component of timeout */
2017 check_timeout_random_component(c);
2018 }
2019
2020 /*
2021 * Wait for I/O events. Used for both TCP & UDP sockets
2022 * in point-to-point mode and for UDP sockets in
2023 * point-to-multipoint mode.
2024 */
2025
2026 void
2027 io_wait_dowork(struct context *c, const unsigned int flags)
2028 {
2029 unsigned int socket = 0;
2030 unsigned int tuntap = 0;
2031 struct event_set_return esr[4];
2032
2033 /* These shifts all depend on EVENT_READ (=1) and EVENT_WRITE (=2)
2034 * and are added to the shift. Check openvpn.h for more details.
2035 */
2036 static int socket_shift = SOCKET_SHIFT;
2037 static int tun_shift = TUN_SHIFT;
2038 static int err_shift = ERR_SHIFT;
2039 #ifdef ENABLE_MANAGEMENT
2040 static int management_shift = MANAGEMENT_SHIFT;
2041 #endif
2042 #ifdef ENABLE_ASYNC_PUSH
2043 static int file_shift = FILE_SHIFT;
2044 #endif
2045 #if defined(TARGET_LINUX) || defined(TARGET_FREEBSD)
2046 static int dco_shift = DCO_SHIFT; /* Event from DCO linux kernel module */
2047 #endif
2048
2049 /*
2050 * Decide what kind of events we want to wait for.
2051 */
2052 event_reset(c->c2.event_set);
2053
2054 /*
2055 * On win32 we use the keyboard or an event object as a source
2056 * of asynchronous signals.
2057 */
2058 if (flags & IOW_WAIT_SIGNAL)
2059 {
2060 wait_signal(c->c2.event_set, (void *)&err_shift);
2061 }
2062
2063 /*
2064 * If outgoing data (for TCP/UDP port) pending, wait for ready-to-send
2065 * status from TCP/UDP port. Otherwise, wait for incoming data on
2066 * TUN/TAP device.
2067 */
2068 if (flags & IOW_TO_LINK)
2069 {
2070 if (flags & IOW_SHAPER)
2071 {
2072 /*
2073 * If sending this packet would put us over our traffic shaping
2074 * quota, don't send -- instead compute the delay we must wait
2075 * until it will be OK to send the packet.
2076 */
2077 int delay = 0;
2078
2079 /* set traffic shaping delay in microseconds */
2080 if (c->options.shaper)
2081 {
2082 delay = max_int(delay, shaper_delay(&c->c2.shaper));
2083 }
2084
2085 if (delay < 1000)
2086 {
2087 socket |= EVENT_WRITE;
2088 }
2089 else
2090 {
2091 shaper_soonest_event(&c->c2.timeval, delay);
2092 }
2093 }
2094 else
2095 {
2096 socket |= EVENT_WRITE;
2097 }
2098 }
2099 else if (!((flags & IOW_FRAG) && TO_LINK_FRAG(c)))
2100 {
2101 if (flags & IOW_READ_TUN)
2102 {
2103 tuntap |= EVENT_READ;
2104 }
2105 }
2106
2107 /*
2108 * If outgoing data (for TUN/TAP device) pending, wait for ready-to-send status
2109 * from device. Otherwise, wait for incoming data on TCP/UDP port.
2110 */
2111 if (flags & IOW_TO_TUN)
2112 {
2113 tuntap |= EVENT_WRITE;
2114 }
2115 else
2116 {
2117 if (flags & IOW_READ_LINK)
2118 {
2119 socket |= EVENT_READ;
2120 }
2121 }
2122
2123 /*
2124 * outgoing bcast buffer waiting to be sent?
2125 */
2126 if (flags & IOW_MBUF)
2127 {
2128 socket |= EVENT_WRITE;
2129 }
2130
2131 /*
2132 * Force wait on TUN input, even if also waiting on TCP/UDP output
2133 */
2134 if (flags & IOW_READ_TUN_FORCE)
2135 {
2136 tuntap |= EVENT_READ;
2137 }
2138
2139 #ifdef _WIN32
2140 if (tuntap_is_wintun(c->c1.tuntap))
2141 {
2142 /*
2143 * With wintun we are only interested in read event. Ring buffer is
2144 * always ready for write, so we don't do wait.
2145 */
2146 tuntap = EVENT_READ;
2147 }
2148 #endif
2149
2150 /*
2151 * Configure event wait based on socket, tuntap flags.
2152 */
2153 socket_set(c->c2.link_socket, c->c2.event_set, socket, (void *)&socket_shift, NULL);
2154 tun_set(c->c1.tuntap, c->c2.event_set, tuntap, (void *)&tun_shift, NULL);
2155 #if defined(TARGET_LINUX) || defined(TARGET_FREEBSD)
2156 if (socket & EVENT_READ && c->c2.did_open_tun)
2157 {
2158 dco_event_set(&c->c1.tuntap->dco, c->c2.event_set, (void *)&dco_shift);
2159 }
2160 #endif
2161
2162 #ifdef ENABLE_MANAGEMENT
2163 if (management)
2164 {
2165 management_socket_set(management, c->c2.event_set, (void *)&management_shift, NULL);
2166 }
2167 #endif
2168
2169 #ifdef ENABLE_ASYNC_PUSH
2170 /* arm inotify watcher */
2171 if (c->options.mode == MODE_SERVER)
2172 {
2173 event_ctl(c->c2.event_set, c->c2.inotify_fd, EVENT_READ, (void *)&file_shift);
2174 }
2175 #endif
2176
2177 /*
2178 * Possible scenarios:
2179 * (1) tcp/udp port has data available to read
2180 * (2) tcp/udp port is ready to accept more data to write
2181 * (3) tun dev has data available to read
2182 * (4) tun dev is ready to accept more data to write
2183 * (5) we received a signal (handler sets signal_received)
2184 * (6) timeout (tv) expired
2185 */
2186
2187 c->c2.event_set_status = ES_ERROR;
2188
2189 if (!c->sig->signal_received)
2190 {
2191 if (!(flags & IOW_CHECK_RESIDUAL) || !socket_read_residual(c->c2.link_socket))
2192 {
2193 int status;
2194
2195 #ifdef ENABLE_DEBUG
2196 if (check_debug_level(D_EVENT_WAIT))
2197 {
2198 show_wait_status(c);
2199 }
2200 #endif
2201
2202 /*
2203 * Wait for something to happen.
2204 */
2205 status = event_wait(c->c2.event_set, &c->c2.timeval, esr, SIZE(esr));
2206
2207 check_status(status, "event_wait", NULL, NULL);
2208
2209 if (status > 0)
2210 {
2211 int i;
2212 c->c2.event_set_status = 0;
2213 for (i = 0; i < status; ++i)
2214 {
2215 const struct event_set_return *e = &esr[i];
2216 c->c2.event_set_status |= ((e->rwflags & 3) << *((int *)e->arg));
2217 }
2218 }
2219 else if (status == 0)
2220 {
2221 c->c2.event_set_status = ES_TIMEOUT;
2222 }
2223 }
2224 else
2225 {
2226 c->c2.event_set_status = SOCKET_READ;
2227 }
2228 }
2229
2230 /* 'now' should always be a reasonably up-to-date timestamp */
2231 update_time();
2232
2233 /* set signal_received if a signal was received */
2234 if (c->c2.event_set_status & ES_ERROR)
2235 {
2236 get_signal(&c->sig->signal_received);
2237 }
2238
2239 dmsg(D_EVENT_WAIT, "I/O WAIT status=0x%04x", c->c2.event_set_status);
2240 }
2241
2242 void
2243 process_io(struct context *c)
2244 {
2245 const unsigned int status = c->c2.event_set_status;
2246
2247 #ifdef ENABLE_MANAGEMENT
2248 if (status & (MANAGEMENT_READ|MANAGEMENT_WRITE))
2249 {
2250 ASSERT(management);
2251 management_io(management);
2252 }
2253 #endif
2254
2255 /* TCP/UDP port ready to accept write */
2256 if (status & SOCKET_WRITE)
2257 {
2258 process_outgoing_link(c);
2259 }
2260 /* TUN device ready to accept write */
2261 else if (status & TUN_WRITE)
2262 {
2263 process_outgoing_tun(c);
2264 }
2265 /* Incoming data on TCP/UDP port */
2266 else if (status & SOCKET_READ)
2267 {
2268 read_incoming_link(c);
2269 if (!IS_SIG(c))
2270 {
2271 process_incoming_link(c);
2272 }
2273 }
2274 /* Incoming data on TUN device */
2275 else if (status & TUN_READ)
2276 {
2277 read_incoming_tun(c);
2278 if (!IS_SIG(c))
2279 {
2280 process_incoming_tun(c);
2281 }
2282 }
2283 else if (status & DCO_READ)
2284 {
2285 if (!IS_SIG(c))
2286 {
2287 process_incoming_dco(c);
2288 }
2289 }
2290 }