]> git.ipfire.org Git - thirdparty/openvpn.git/blob - src/openvpn/reliable.c
Update Copyright statements to 2024
[thirdparty/openvpn.git] / src / openvpn / reliable.c
1 /*
2 * OpenVPN -- An application to securely tunnel IP networks
3 * over a single UDP port, with support for SSL/TLS-based
4 * session authentication and key exchange,
5 * packet encryption, packet authentication, and
6 * packet compression.
7 *
8 * Copyright (C) 2002-2024 OpenVPN Inc <sales@openvpn.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24 /*
25 * These routines implement a reliability layer on top of UDP,
26 * so that SSL/TLS can be run over UDP.
27 */
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include "syshead.h"
34
35 #include "buffer.h"
36 #include "error.h"
37 #include "common.h"
38 #include "reliable.h"
39
40 #include "memdbg.h"
41
42 /* calculates test - base while allowing for base or test wraparound. test is
43 * assumed to be higher than base */
44 static inline packet_id_type
45 subtract_pid(const packet_id_type test, const packet_id_type base)
46 {
47 return test - base;
48 }
49
50 /*
51 * verify that test - base < extent while allowing for base or test wraparound
52 */
53 static inline bool
54 reliable_pid_in_range1(const packet_id_type test,
55 const packet_id_type base,
56 const unsigned int extent)
57 {
58 return subtract_pid(test, base) < extent;
59 }
60
61 /*
62 * verify that test < base + extent while allowing for base or test wraparound
63 */
64 static inline bool
65 reliable_pid_in_range2(const packet_id_type test,
66 const packet_id_type base,
67 const unsigned int extent)
68 {
69 if (base + extent >= base)
70 {
71 if (test < base + extent)
72 {
73 return true;
74 }
75 }
76 else
77 {
78 if ((test+0x80000000u) < (base+0x80000000u) + extent)
79 {
80 return true;
81 }
82 }
83
84 return false;
85 }
86
87 /*
88 * verify that p1 < p2 while allowing for p1 or p2 wraparound
89 */
90 static inline bool
91 reliable_pid_min(const packet_id_type p1,
92 const packet_id_type p2)
93 {
94 return !reliable_pid_in_range1(p1, p2, 0x80000000u);
95 }
96
97 /* check if a particular packet_id is present in ack */
98 static inline bool
99 reliable_ack_packet_id_present(struct reliable_ack *ack, packet_id_type pid)
100 {
101 int i;
102 for (i = 0; i < ack->len; ++i)
103 {
104 if (ack->packet_id[i] == pid)
105 {
106 return true;
107 }
108 }
109 return false;
110 }
111
112 /* get a packet_id from buf */
113 bool
114 reliable_ack_read_packet_id(struct buffer *buf, packet_id_type *pid)
115 {
116 packet_id_type net_pid;
117
118 if (buf_read(buf, &net_pid, sizeof(net_pid)))
119 {
120 *pid = ntohpid(net_pid);
121 dmsg(D_REL_DEBUG, "ACK read ID " packet_id_format " (buf->len=%d)",
122 (packet_id_print_type)*pid, buf->len);
123 return true;
124 }
125
126 dmsg(D_REL_LOW, "ACK read ID FAILED (buf->len=%d)", buf->len);
127 return false;
128 }
129
130 /* acknowledge a packet_id by adding it to a struct reliable_ack */
131 bool
132 reliable_ack_acknowledge_packet_id(struct reliable_ack *ack, packet_id_type pid)
133 {
134 if (!reliable_ack_packet_id_present(ack, pid) && ack->len < RELIABLE_ACK_SIZE)
135 {
136 ack->packet_id[ack->len++] = pid;
137 dmsg(D_REL_DEBUG, "ACK acknowledge ID " packet_id_format " (ack->len=%d)",
138 (packet_id_print_type)pid, ack->len);
139 return true;
140 }
141
142 dmsg(D_REL_LOW, "ACK acknowledge ID " packet_id_format " FAILED (ack->len=%d)",
143 (packet_id_print_type)pid, ack->len);
144 return false;
145 }
146
147
148 bool
149 reliable_ack_read(struct reliable_ack *ack,
150 struct buffer *buf, const struct session_id *sid)
151 {
152 struct session_id session_id_remote;
153
154 if (!reliable_ack_parse(buf, ack, &session_id_remote))
155 {
156 return false;
157 }
158
159 if (ack->len >= 1 && (!session_id_defined(&session_id_remote)
160 || !session_id_equal(&session_id_remote, sid)))
161 {
162 struct gc_arena gc = gc_new();
163 dmsg(D_REL_LOW,
164 "ACK read BAD SESSION-ID FROM REMOTE, local=%s, remote=%s",
165 session_id_print(sid, &gc), session_id_print(&session_id_remote, &gc));
166 gc_free(&gc);
167 return false;
168 }
169 return true;
170 }
171
172 bool
173 reliable_ack_parse(struct buffer *buf, struct reliable_ack *ack,
174 struct session_id *session_id_remote)
175 {
176 uint8_t count;
177 ack->len = 0;
178
179 if (!buf_read(buf, &count, sizeof(count)))
180 {
181 return false;
182 }
183 for (int i = 0; i < count; ++i)
184 {
185 packet_id_type net_pid;
186 if (!buf_read(buf, &net_pid, sizeof(net_pid)))
187 {
188 return false;
189 }
190 if (ack->len >= RELIABLE_ACK_SIZE)
191 {
192 return false;
193 }
194 packet_id_type pid = ntohpid(net_pid);
195 ack->packet_id[ack->len++] = pid;
196 }
197 if (count)
198 {
199 if (!session_id_read(session_id_remote, buf))
200 {
201 return false;
202 }
203 }
204 return true;
205 }
206
207 /**
208 * Copies the first n acks from \c ack to \c ack_mru
209 */
210 void
211 copy_acks_to_mru(struct reliable_ack *ack, struct reliable_ack *ack_mru, int n)
212 {
213 ASSERT(ack->len >= n);
214 /* This loop is backward to ensure the same order as in ack */
215 for (int i = n-1; i >= 0; i--)
216 {
217 packet_id_type id = ack->packet_id[i];
218
219 /* Handle special case of ack_mru empty */
220 if (ack_mru->len == 0)
221 {
222 ack_mru->len = 1;
223 ack_mru->packet_id[0] = id;
224 }
225
226 bool idfound = false;
227
228 /* Move all existing entries one to the right */
229 packet_id_type move = id;
230
231 for (int j = 0; j < ack_mru->len; j++)
232 {
233 packet_id_type tmp = ack_mru->packet_id[j];
234 ack_mru->packet_id[j] = move;
235 move = tmp;
236
237 if (move == id)
238 {
239 idfound = true;
240 break;
241 }
242 }
243
244 if (!idfound && ack_mru->len < RELIABLE_ACK_SIZE)
245 {
246 ack_mru->packet_id[ack_mru->len] = move;
247 ack_mru->len++;
248 }
249 }
250 }
251
252 /* write a packet ID acknowledgement record to buf, */
253 /* removing all acknowledged entries from ack */
254 bool
255 reliable_ack_write(struct reliable_ack *ack,
256 struct reliable_ack *ack_mru,
257 struct buffer *buf,
258 const struct session_id *sid, int max, bool prepend)
259 {
260 int i, j;
261 uint8_t n;
262 struct buffer sub;
263
264 n = ack->len;
265 if (n > max)
266 {
267 n = max;
268 }
269
270 copy_acks_to_mru(ack, ack_mru, n);
271
272 /* Number of acks we can resend that still fit into the packet */
273 uint8_t total_acks = min_int(max, ack_mru->len);
274
275 sub = buf_sub(buf, ACK_SIZE(total_acks), prepend);
276 if (!BDEF(&sub))
277 {
278 goto error;
279 }
280 ASSERT(buf_write_u8(&sub, total_acks));
281
282 /* Write the actual acks to the packets. Since we copied the acks that
283 * are going out now already to the front of ack_mru we can fetch all
284 * acks from ack_mru */
285 for (i = 0; i < total_acks; ++i)
286 {
287 packet_id_type pid = ack_mru->packet_id[i];
288 packet_id_type net_pid = htonpid(pid);
289 ASSERT(buf_write(&sub, &net_pid, sizeof(net_pid)));
290 dmsg(D_REL_DEBUG, "ACK write ID " packet_id_format " (ack->len=%d, n=%d)", (packet_id_print_type)pid, ack->len, n);
291 }
292 if (total_acks)
293 {
294 ASSERT(session_id_defined(sid));
295 ASSERT(session_id_write(sid, &sub));
296 }
297 if (n)
298 {
299 for (i = 0, j = n; j < ack->len; )
300 {
301 ack->packet_id[i++] = ack->packet_id[j++];
302 }
303 ack->len = i;
304 }
305
306 return true;
307
308 error:
309 return false;
310 }
311
312 /* print a reliable ACK record coming off the wire */
313 const char *
314 reliable_ack_print(struct buffer *buf, bool verbose, struct gc_arena *gc)
315 {
316 int i;
317 uint8_t n_ack;
318 struct session_id sid_ack;
319 packet_id_type pid;
320 struct buffer out = alloc_buf_gc(256, gc);
321
322 buf_printf(&out, "[");
323 if (!buf_read(buf, &n_ack, sizeof(n_ack)))
324 {
325 goto done;
326 }
327 for (i = 0; i < n_ack; ++i)
328 {
329 if (!buf_read(buf, &pid, sizeof(pid)))
330 {
331 goto done;
332 }
333 pid = ntohpid(pid);
334 buf_printf(&out, " " packet_id_format, (packet_id_print_type)pid);
335 }
336 if (n_ack)
337 {
338 if (!session_id_read(&sid_ack, buf))
339 {
340 goto done;
341 }
342 if (verbose)
343 {
344 buf_printf(&out, " sid=%s", session_id_print(&sid_ack, gc));
345 }
346 }
347
348 done:
349 buf_printf(&out, " ]");
350 return BSTR(&out);
351 }
352
353 /*
354 * struct reliable member functions.
355 */
356
357 void
358 reliable_init(struct reliable *rel, int buf_size, int offset, int array_size, bool hold)
359 {
360 int i;
361
362 CLEAR(*rel);
363 ASSERT(array_size > 0 && array_size <= RELIABLE_CAPACITY);
364 rel->hold = hold;
365 rel->size = array_size;
366 rel->offset = offset;
367 for (i = 0; i < rel->size; ++i)
368 {
369 struct reliable_entry *e = &rel->array[i];
370 e->buf = alloc_buf(buf_size);
371 ASSERT(buf_init(&e->buf, offset));
372 }
373 }
374
375 void
376 reliable_free(struct reliable *rel)
377 {
378 if (!rel)
379 {
380 return;
381 }
382 int i;
383 for (i = 0; i < rel->size; ++i)
384 {
385 struct reliable_entry *e = &rel->array[i];
386 free_buf(&e->buf);
387 }
388 free(rel);
389 }
390
391 /* no active buffers? */
392 bool
393 reliable_empty(const struct reliable *rel)
394 {
395 int i;
396 for (i = 0; i < rel->size; ++i)
397 {
398 const struct reliable_entry *e = &rel->array[i];
399 if (e->active)
400 {
401 return false;
402 }
403 }
404 return true;
405 }
406
407 /* del acknowledged items from send buf */
408 void
409 reliable_send_purge(struct reliable *rel, const struct reliable_ack *ack)
410 {
411 int i, j;
412 for (i = 0; i < ack->len; ++i)
413 {
414 packet_id_type pid = ack->packet_id[i];
415 for (j = 0; j < rel->size; ++j)
416 {
417 struct reliable_entry *e = &rel->array[j];
418 if (e->active && e->packet_id == pid)
419 {
420 dmsg(D_REL_DEBUG,
421 "ACK received for pid " packet_id_format ", deleting from send buffer",
422 (packet_id_print_type)pid);
423 #if 0
424 /* DEBUGGING -- how close were we timing out on ACK failure and resending? */
425 {
426 if (e->next_try)
427 {
428 const interval_t wake = e->next_try - now;
429 msg(M_INFO, "ACK " packet_id_format ", wake=%d", pid, wake);
430 }
431 }
432 #endif
433 e->active = false;
434 }
435 else if (e->active && e->packet_id < pid)
436 {
437 /* We have received an ACK for a packet with a higher PID. Either
438 * we have received ACKs out of or order or the packet has been
439 * lost. We count the number of ACKs to determine if we should
440 * resend it early. */
441 e->n_acks++;
442 }
443 }
444 }
445 }
446
447 #ifdef ENABLE_DEBUG
448 /* print the current sequence of active packet IDs */
449 static const char *
450 reliable_print_ids(const struct reliable *rel, struct gc_arena *gc)
451 {
452 struct buffer out = alloc_buf_gc(256, gc);
453 int i;
454
455 buf_printf(&out, "[" packet_id_format "]", (packet_id_print_type)rel->packet_id);
456 for (i = 0; i < rel->size; ++i)
457 {
458 const struct reliable_entry *e = &rel->array[i];
459 if (e->active)
460 {
461 buf_printf(&out, " " packet_id_format, (packet_id_print_type)e->packet_id);
462 }
463 }
464 return BSTR(&out);
465 }
466 #endif /* ENABLE_DEBUG */
467
468 /* true if at least one free buffer available */
469 bool
470 reliable_can_get(const struct reliable *rel)
471 {
472 struct gc_arena gc = gc_new();
473 int i;
474 for (i = 0; i < rel->size; ++i)
475 {
476 const struct reliable_entry *e = &rel->array[i];
477 if (!e->active)
478 {
479 return true;
480 }
481 }
482 dmsg(D_REL_LOW, "ACK no free receive buffer available: %s", reliable_print_ids(rel, &gc));
483 gc_free(&gc);
484 return false;
485 }
486
487 /* make sure that incoming packet ID isn't a replay */
488 bool
489 reliable_not_replay(const struct reliable *rel, packet_id_type id)
490 {
491 struct gc_arena gc = gc_new();
492 int i;
493 if (reliable_pid_min(id, rel->packet_id))
494 {
495 goto bad;
496 }
497 for (i = 0; i < rel->size; ++i)
498 {
499 const struct reliable_entry *e = &rel->array[i];
500 if (e->active && e->packet_id == id)
501 {
502 goto bad;
503 }
504 }
505 gc_free(&gc);
506 return true;
507
508 bad:
509 dmsg(D_REL_DEBUG, "ACK " packet_id_format " is a replay: %s", (packet_id_print_type)id, reliable_print_ids(rel, &gc));
510 gc_free(&gc);
511 return false;
512 }
513
514 /* make sure that incoming packet ID won't deadlock the receive buffer */
515 bool
516 reliable_wont_break_sequentiality(const struct reliable *rel, packet_id_type id)
517 {
518 struct gc_arena gc = gc_new();
519
520 const int ret = reliable_pid_in_range2(id, rel->packet_id, rel->size);
521
522 if (!ret)
523 {
524 dmsg(D_REL_LOW, "ACK " packet_id_format " breaks sequentiality: %s",
525 (packet_id_print_type)id, reliable_print_ids(rel, &gc));
526 }
527
528 dmsg(D_REL_DEBUG, "ACK RWBS rel->size=%d rel->packet_id=%08x id=%08x ret=%d", rel->size, rel->packet_id, id, ret);
529
530 gc_free(&gc);
531 return ret;
532 }
533
534 /* grab a free buffer */
535 struct buffer *
536 reliable_get_buf(struct reliable *rel)
537 {
538 int i;
539 for (i = 0; i < rel->size; ++i)
540 {
541 struct reliable_entry *e = &rel->array[i];
542 if (!e->active)
543 {
544 ASSERT(buf_init(&e->buf, rel->offset));
545 return &e->buf;
546 }
547 }
548 return NULL;
549 }
550
551 int
552 reliable_get_num_output_sequenced_available(struct reliable *rel)
553 {
554 struct gc_arena gc = gc_new();
555 packet_id_type min_id = 0;
556 bool min_id_defined = false;
557
558 /* find minimum active packet_id */
559 for (int i = 0; i < rel->size; ++i)
560 {
561 const struct reliable_entry *e = &rel->array[i];
562 if (e->active)
563 {
564 if (!min_id_defined || reliable_pid_min(e->packet_id, min_id))
565 {
566 min_id_defined = true;
567 min_id = e->packet_id;
568 }
569 }
570 }
571
572 int ret = rel->size;
573 if (min_id_defined)
574 {
575 ret -= subtract_pid(rel->packet_id, min_id);
576 }
577 gc_free(&gc);
578 return ret;
579 }
580
581 /* grab a free buffer, fail if buffer clogged by unacknowledged low packet IDs */
582 struct buffer *
583 reliable_get_buf_output_sequenced(struct reliable *rel)
584 {
585 struct gc_arena gc = gc_new();
586 int i;
587 packet_id_type min_id = 0;
588 bool min_id_defined = false;
589 struct buffer *ret = NULL;
590
591 /* find minimum active packet_id */
592 for (i = 0; i < rel->size; ++i)
593 {
594 const struct reliable_entry *e = &rel->array[i];
595 if (e->active)
596 {
597 if (!min_id_defined || reliable_pid_min(e->packet_id, min_id))
598 {
599 min_id_defined = true;
600 min_id = e->packet_id;
601 }
602 }
603 }
604
605 if (!min_id_defined || reliable_pid_in_range1(rel->packet_id, min_id, rel->size))
606 {
607 ret = reliable_get_buf(rel);
608 }
609 else
610 {
611 dmsg(D_REL_LOW, "ACK output sequence broken: %s", reliable_print_ids(rel, &gc));
612 }
613 gc_free(&gc);
614 return ret;
615 }
616
617 /* get active buffer for next sequentially increasing key ID */
618 struct reliable_entry *
619 reliable_get_entry_sequenced(struct reliable *rel)
620 {
621 int i;
622 for (i = 0; i < rel->size; ++i)
623 {
624 struct reliable_entry *e = &rel->array[i];
625 if (e->active && e->packet_id == rel->packet_id)
626 {
627 return e;
628 }
629 }
630 return NULL;
631 }
632
633 /* return true if reliable_send would return a non-NULL result */
634 bool
635 reliable_can_send(const struct reliable *rel)
636 {
637 struct gc_arena gc = gc_new();
638 int i;
639 int n_active = 0, n_current = 0;
640 for (i = 0; i < rel->size; ++i)
641 {
642 const struct reliable_entry *e = &rel->array[i];
643 if (e->active)
644 {
645 ++n_active;
646 if (now >= e->next_try || e->n_acks >= N_ACK_RETRANSMIT)
647 {
648 ++n_current;
649 }
650 }
651 }
652 dmsg(D_REL_DEBUG, "ACK reliable_can_send active=%d current=%d : %s",
653 n_active,
654 n_current,
655 reliable_print_ids(rel, &gc));
656
657 gc_free(&gc);
658 return n_current > 0 && !rel->hold;
659 }
660
661 /* return next buffer to send to remote */
662 struct buffer *
663 reliable_send(struct reliable *rel, int *opcode)
664 {
665 int i;
666 struct reliable_entry *best = NULL;
667 const time_t local_now = now;
668
669 for (i = 0; i < rel->size; ++i)
670 {
671 struct reliable_entry *e = &rel->array[i];
672
673 /* If N_ACK_RETRANSMIT later packets have received ACKs, we assume
674 * that the packet was lost and resend it even if the timeout has
675 * not expired yet. */
676 if (e->active
677 && (e->n_acks >= N_ACK_RETRANSMIT || local_now >= e->next_try))
678 {
679 if (!best || reliable_pid_min(e->packet_id, best->packet_id))
680 {
681 best = e;
682 }
683 }
684 }
685 if (best)
686 {
687 /* exponential backoff */
688 best->next_try = local_now + best->timeout;
689 best->timeout *= 2;
690 best->n_acks = 0;
691 *opcode = best->opcode;
692 dmsg(D_REL_DEBUG, "ACK reliable_send ID " packet_id_format " (size=%d to=%d)",
693 (packet_id_print_type)best->packet_id, best->buf.len,
694 (int)(best->next_try - local_now));
695 return &best->buf;
696 }
697 return NULL;
698 }
699
700 /* schedule all pending packets for immediate retransmit */
701 void
702 reliable_schedule_now(struct reliable *rel)
703 {
704 int i;
705 dmsg(D_REL_DEBUG, "ACK reliable_schedule_now");
706 rel->hold = false;
707 for (i = 0; i < rel->size; ++i)
708 {
709 struct reliable_entry *e = &rel->array[i];
710 if (e->active)
711 {
712 e->next_try = now;
713 e->timeout = rel->initial_timeout;
714 }
715 }
716 }
717
718 /* in how many seconds should we wake up to check for timeout */
719 /* if we return BIG_TIMEOUT, nothing to wait for */
720 interval_t
721 reliable_send_timeout(const struct reliable *rel)
722 {
723 struct gc_arena gc = gc_new();
724 interval_t ret = BIG_TIMEOUT;
725 int i;
726 const time_t local_now = now;
727
728 for (i = 0; i < rel->size; ++i)
729 {
730 const struct reliable_entry *e = &rel->array[i];
731 if (e->active)
732 {
733 if (e->next_try <= local_now)
734 {
735 ret = 0;
736 break;
737 }
738 else
739 {
740 ret = min_int(ret, e->next_try - local_now);
741 }
742 }
743 }
744
745 dmsg(D_REL_DEBUG, "ACK reliable_send_timeout %d %s",
746 (int) ret,
747 reliable_print_ids(rel, &gc));
748
749 gc_free(&gc);
750 return ret;
751 }
752
753 /*
754 * Enable an incoming buffer previously returned by a get function as active.
755 */
756
757 void
758 reliable_mark_active_incoming(struct reliable *rel, struct buffer *buf,
759 packet_id_type pid, int opcode)
760 {
761 int i;
762 for (i = 0; i < rel->size; ++i)
763 {
764 struct reliable_entry *e = &rel->array[i];
765 if (buf == &e->buf)
766 {
767 e->active = true;
768
769 /* packets may not arrive in sequential order */
770 e->packet_id = pid;
771
772 /* check for replay */
773 ASSERT(!reliable_pid_min(pid, rel->packet_id));
774
775 e->opcode = opcode;
776 e->next_try = 0;
777 e->timeout = 0;
778 e->n_acks = 0;
779 dmsg(D_REL_DEBUG, "ACK mark active incoming ID " packet_id_format, (packet_id_print_type)e->packet_id);
780 return;
781 }
782 }
783 ASSERT(0); /* buf not found in rel */
784 }
785
786 /*
787 * Enable an outgoing buffer previously returned by a get function as active.
788 */
789
790 void
791 reliable_mark_active_outgoing(struct reliable *rel, struct buffer *buf, int opcode)
792 {
793 int i;
794 for (i = 0; i < rel->size; ++i)
795 {
796 struct reliable_entry *e = &rel->array[i];
797 if (buf == &e->buf)
798 {
799 /* Write mode, increment packet_id (i.e. sequence number)
800 * linearly and prepend id to packet */
801 packet_id_type net_pid;
802 e->packet_id = rel->packet_id++;
803 net_pid = htonpid(e->packet_id);
804 ASSERT(buf_write_prepend(buf, &net_pid, sizeof(net_pid)));
805 e->active = true;
806 e->opcode = opcode;
807 e->next_try = 0;
808 e->timeout = rel->initial_timeout;
809 dmsg(D_REL_DEBUG, "ACK mark active outgoing ID " packet_id_format, (packet_id_print_type)e->packet_id);
810 return;
811 }
812 }
813 ASSERT(0); /* buf not found in rel */
814 }
815
816 /* delete a buffer previously activated by reliable_mark_active() */
817 void
818 reliable_mark_deleted(struct reliable *rel, struct buffer *buf)
819 {
820 int i;
821 for (i = 0; i < rel->size; ++i)
822 {
823 struct reliable_entry *e = &rel->array[i];
824 if (buf == &e->buf)
825 {
826 e->active = false;
827 rel->packet_id = e->packet_id + 1;
828 return;
829 }
830 }
831 ASSERT(0);
832 }
833
834 #if 0
835
836 void
837 reliable_ack_debug_print(const struct reliable_ack *ack, char *desc)
838 {
839 int i;
840
841 printf("********* struct reliable_ack %s\n", desc);
842 for (i = 0; i < ack->len; ++i)
843 {
844 printf(" %d: " packet_id_format "\n", i, (packet_id_print_type) ack->packet_id[i]);
845 }
846 }
847
848 void
849 reliable_debug_print(const struct reliable *rel, char *desc)
850 {
851 int i;
852 update_time();
853
854 printf("********* struct reliable %s\n", desc);
855 printf(" initial_timeout=%d\n", (int)rel->initial_timeout);
856 printf(" packet_id=" packet_id_format "\n", rel->packet_id);
857 printf(" now=%" PRIi64 "\n", (int64_t)now);
858 for (i = 0; i < rel->size; ++i)
859 {
860 const struct reliable_entry *e = &rel->array[i];
861 if (e->active)
862 {
863 printf(" %d: packet_id=" packet_id_format " len=%d", i, e->packet_id, e->buf.len);
864 printf(" next_try=%" PRIi64, (int64_t)e->next_try);
865 printf("\n");
866 }
867 }
868 }
869
870 #endif /* if 0 */