2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
48 #include <linux/pkt_sched.h>
69 u32 link_congs
; /* # port sends blocked by congestion */
72 u32 max_queue_sz
; /* send queue size high water mark */
73 u32 accu_queue_sz
; /* used for send queue size profiling */
74 u32 queue_sz_counts
; /* used for send queue size profiling */
75 u32 msg_length_counts
; /* used for message length profiling */
76 u32 msg_lengths_total
; /* used for message length profiling */
77 u32 msg_length_profile
[7]; /* used for msg. length profiling */
81 * struct tipc_link - TIPC link data structure
82 * @addr: network address of link's peer node
83 * @name: link name character string
84 * @media_addr: media address to use when sending messages over link
86 * @net: pointer to namespace struct
87 * @refcnt: reference counter for permanent references (owner node & timer)
88 * @peer_session: link session # being used by peer end of link
89 * @peer_bearer_id: bearer id used by link's peer endpoint
90 * @bearer_id: local bearer id used by link
91 * @tolerance: minimum link continuity loss needed to reset link [in ms]
92 * @abort_limit: # of unacknowledged continuity probes needed to reset link
93 * @state: current state of link FSM
94 * @peer_caps: bitmap describing capabilities of peer node
95 * @silent_intv_cnt: # of timer intervals without any reception from peer
96 * @proto_msg: template for control messages generated by link
97 * @pmsg: convenience pointer to "proto_msg" field
98 * @priority: current link priority
99 * @net_plane: current link network plane ('A' through 'H')
100 * @mon_state: cookie with information needed by link monitor
101 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
102 * @exp_msg_count: # of tunnelled messages expected during link changeover
103 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
104 * @mtu: current maximum packet size for this link
105 * @advertised_mtu: advertised own mtu when link is being established
106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages
109 * @prev_from: sequence number of most previous retransmission request
110 * @stale_cnt: counter for number of identical retransmit attempts
111 * @stale_limit: time when repeated identical retransmits must force link reset
112 * @ackers: # of peers that needs to ack each packet before it can be released
113 * @acked: # last packet acked by a certain peer. Used for broadcast.
114 * @rcv_nxt: next sequence number to expect for inbound messages
115 * @deferred_queue: deferred queue saved OOS b'cast message received from node
116 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
117 * @inputq: buffer queue for messages to be delivered upwards
118 * @namedq: buffer queue for name table messages to be delivered upwards
119 * @next_out: ptr to first unsent outbound message in queue
120 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
121 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
122 * @reasm_buf: head of partially reassembled inbound message fragments
123 * @bc_rcvr: marks that this is a broadcast receiver link
124 * @stats: collects statistics regarding link activity
128 char name
[TIPC_MAX_LINK_NAME
];
131 /* Management and link supervision data */
145 char if_name
[TIPC_MAX_IF_NAME
];
148 struct tipc_mon_state mon_state
;
153 struct sk_buff
*failover_reasm_skb
;
154 struct sk_buff_head failover_deferdq
;
156 /* Max packet negotiation */
161 struct sk_buff_head transmq
;
162 struct sk_buff_head backlogq
;
171 unsigned long stale_limit
;
176 struct sk_buff_head deferdq
;
177 struct sk_buff_head
*inputq
;
178 struct sk_buff_head
*namedq
;
180 /* Congestion handling */
181 struct sk_buff_head wakeupq
;
183 /* Fragmentation/reassembly */
184 struct sk_buff
*reasm_buf
;
189 struct tipc_link
*bc_rcvlink
;
190 struct tipc_link
*bc_sndlink
;
195 struct tipc_stats stats
;
199 * Error message prefixes
201 static const char *link_co_err
= "Link tunneling error, ";
202 static const char *link_rst_msg
= "Resetting link ";
204 /* Send states for broadcast NACKs
207 BC_NACK_SND_CONDITIONAL
,
208 BC_NACK_SND_UNCONDITIONAL
,
209 BC_NACK_SND_SUPPRESS
,
212 #define TIPC_BC_RETR_LIM msecs_to_jiffies(10) /* [ms] */
213 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
216 * Interval between NACKs when packets arrive out of order
218 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
223 LINK_ESTABLISHED
= 0xe,
224 LINK_ESTABLISHING
= 0xe << 4,
225 LINK_RESET
= 0x1 << 8,
226 LINK_RESETTING
= 0x2 << 12,
227 LINK_PEER_RESET
= 0xd << 16,
228 LINK_FAILINGOVER
= 0xf << 20,
229 LINK_SYNCHING
= 0xc << 24
232 /* Link FSM state checking routines
234 static int link_is_up(struct tipc_link
*l
)
236 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
239 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
240 struct sk_buff_head
*xmitq
);
241 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
242 bool probe_reply
, u16 rcvgap
,
243 int tolerance
, int priority
,
244 struct sk_buff_head
*xmitq
);
245 static void link_print(struct tipc_link
*l
, const char *str
);
246 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
247 struct sk_buff_head
*xmitq
);
248 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
249 struct sk_buff_head
*xmitq
);
250 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 to
);
251 static u16
tipc_build_gap_ack_blks(struct tipc_link
*l
, void *data
);
252 static int tipc_link_advance_transmq(struct tipc_link
*l
, u16 acked
, u16 gap
,
253 struct tipc_gap_ack_blks
*ga
,
254 struct sk_buff_head
*xmitq
);
257 * Simple non-static link routines (i.e. referenced outside this file)
259 bool tipc_link_is_up(struct tipc_link
*l
)
261 return link_is_up(l
);
264 bool tipc_link_peer_is_down(struct tipc_link
*l
)
266 return l
->state
== LINK_PEER_RESET
;
269 bool tipc_link_is_reset(struct tipc_link
*l
)
271 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
274 bool tipc_link_is_establishing(struct tipc_link
*l
)
276 return l
->state
== LINK_ESTABLISHING
;
279 bool tipc_link_is_synching(struct tipc_link
*l
)
281 return l
->state
== LINK_SYNCHING
;
284 bool tipc_link_is_failingover(struct tipc_link
*l
)
286 return l
->state
== LINK_FAILINGOVER
;
289 bool tipc_link_is_blocked(struct tipc_link
*l
)
291 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
294 static bool link_is_bc_sndlink(struct tipc_link
*l
)
296 return !l
->bc_sndlink
;
299 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
301 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
304 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
309 u32
tipc_link_id(struct tipc_link
*l
)
311 return l
->peer_bearer_id
<< 16 | l
->bearer_id
;
314 int tipc_link_window(struct tipc_link
*l
)
319 int tipc_link_prio(struct tipc_link
*l
)
324 unsigned long tipc_link_tolerance(struct tipc_link
*l
)
329 struct sk_buff_head
*tipc_link_inputq(struct tipc_link
*l
)
334 char tipc_link_plane(struct tipc_link
*l
)
339 void tipc_link_update_caps(struct tipc_link
*l
, u16 capabilities
)
341 l
->peer_caps
= capabilities
;
344 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
345 struct tipc_link
*uc_l
,
346 struct sk_buff_head
*xmitq
)
348 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
351 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
352 snd_l
->state
= LINK_ESTABLISHED
;
353 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
356 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
357 struct tipc_link
*rcv_l
,
358 struct sk_buff_head
*xmitq
)
360 u16 ack
= snd_l
->snd_nxt
- 1;
363 rcv_l
->bc_peer_is_up
= true;
364 rcv_l
->state
= LINK_ESTABLISHED
;
365 tipc_link_bc_ack_rcv(rcv_l
, ack
, xmitq
);
366 trace_tipc_link_reset(rcv_l
, TIPC_DUMP_ALL
, "bclink removed!");
367 tipc_link_reset(rcv_l
);
368 rcv_l
->state
= LINK_RESET
;
369 if (!snd_l
->ackers
) {
370 trace_tipc_link_reset(snd_l
, TIPC_DUMP_ALL
, "zero ackers!");
371 tipc_link_reset(snd_l
);
372 snd_l
->state
= LINK_RESET
;
373 __skb_queue_purge(xmitq
);
377 int tipc_link_bc_peers(struct tipc_link
*l
)
382 static u16
link_bc_rcv_gap(struct tipc_link
*l
)
384 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
387 if (more(l
->snd_nxt
, l
->rcv_nxt
))
388 gap
= l
->snd_nxt
- l
->rcv_nxt
;
390 gap
= buf_seqno(skb
) - l
->rcv_nxt
;
394 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
399 int tipc_link_mtu(struct tipc_link
*l
)
404 u16
tipc_link_rcv_nxt(struct tipc_link
*l
)
409 u16
tipc_link_acked(struct tipc_link
*l
)
414 char *tipc_link_name(struct tipc_link
*l
)
419 u32
tipc_link_state(struct tipc_link
*l
)
425 * tipc_link_create - create a new link
426 * @n: pointer to associated node
427 * @if_name: associated interface name
428 * @bearer_id: id (index) of associated bearer
429 * @tolerance: link tolerance to be used by link
430 * @net_plane: network plane (A,B,c..) this link belongs to
431 * @mtu: mtu to be advertised by link
432 * @priority: priority to be used by link
433 * @window: send window to be used by link
434 * @session: session to be used by link
435 * @ownnode: identity of own node
436 * @peer: node id of peer node
437 * @peer_caps: bitmap describing peer node capabilities
438 * @bc_sndlink: the namespace global link used for broadcast sending
439 * @bc_rcvlink: the peer specific link used for broadcast reception
440 * @inputq: queue to put messages ready for delivery
441 * @namedq: queue to put binding table update messages ready for delivery
442 * @link: return value, pointer to put the created link
444 * Returns true if link was created, otherwise false
446 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
447 int tolerance
, char net_plane
, u32 mtu
, int priority
,
448 int window
, u32 session
, u32 self
,
449 u32 peer
, u8
*peer_id
, u16 peer_caps
,
450 struct tipc_link
*bc_sndlink
,
451 struct tipc_link
*bc_rcvlink
,
452 struct sk_buff_head
*inputq
,
453 struct sk_buff_head
*namedq
,
454 struct tipc_link
**link
)
456 char peer_str
[NODE_ID_STR_LEN
] = {0,};
457 char self_str
[NODE_ID_STR_LEN
] = {0,};
460 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
464 l
->session
= session
;
466 /* Set link name for unicast links only */
468 tipc_nodeid2string(self_str
, tipc_own_id(net
));
469 if (strlen(self_str
) > 16)
470 sprintf(self_str
, "%x", self
);
471 tipc_nodeid2string(peer_str
, peer_id
);
472 if (strlen(peer_str
) > 16)
473 sprintf(peer_str
, "%x", peer
);
475 /* Peer i/f name will be completed by reset/activate message */
476 snprintf(l
->name
, sizeof(l
->name
), "%s:%s-%s:unknown",
477 self_str
, if_name
, peer_str
);
479 strcpy(l
->if_name
, if_name
);
481 l
->peer_caps
= peer_caps
;
483 l
->in_session
= false;
484 l
->bearer_id
= bearer_id
;
485 l
->tolerance
= tolerance
;
487 bc_rcvlink
->tolerance
= tolerance
;
488 l
->net_plane
= net_plane
;
489 l
->advertised_mtu
= mtu
;
491 l
->priority
= priority
;
492 tipc_link_set_queue_limits(l
, window
);
494 l
->bc_sndlink
= bc_sndlink
;
495 l
->bc_rcvlink
= bc_rcvlink
;
498 l
->state
= LINK_RESETTING
;
499 __skb_queue_head_init(&l
->transmq
);
500 __skb_queue_head_init(&l
->backlogq
);
501 __skb_queue_head_init(&l
->deferdq
);
502 __skb_queue_head_init(&l
->failover_deferdq
);
503 skb_queue_head_init(&l
->wakeupq
);
504 skb_queue_head_init(l
->inputq
);
509 * tipc_link_bc_create - create new link to be used for broadcast
510 * @n: pointer to associated node
511 * @mtu: mtu to be used initially if no peers
512 * @window: send window to be used
513 * @inputq: queue to put messages ready for delivery
514 * @namedq: queue to put binding table update messages ready for delivery
515 * @link: return value, pointer to put the created link
517 * Returns true if link was created, otherwise false
519 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
,
520 int mtu
, int window
, u16 peer_caps
,
521 struct sk_buff_head
*inputq
,
522 struct sk_buff_head
*namedq
,
523 struct tipc_link
*bc_sndlink
,
524 struct tipc_link
**link
)
528 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, window
,
529 0, ownnode
, peer
, NULL
, peer_caps
, bc_sndlink
,
530 NULL
, inputq
, namedq
, link
))
534 strcpy(l
->name
, tipc_bclink_name
);
535 trace_tipc_link_reset(l
, TIPC_DUMP_ALL
, "bclink created!");
537 l
->state
= LINK_RESET
;
541 /* Broadcast send link is always up */
542 if (link_is_bc_sndlink(l
))
543 l
->state
= LINK_ESTABLISHED
;
545 /* Disable replicast if even a single peer doesn't support it */
546 if (link_is_bc_rcvlink(l
) && !(peer_caps
& TIPC_BCAST_RCAST
))
547 tipc_bcast_disable_rcast(net
);
553 * tipc_link_fsm_evt - link finite state machine
554 * @l: pointer to link
555 * @evt: state machine event to be processed
557 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
560 int old_state
= l
->state
;
565 case LINK_PEER_RESET_EVT
:
566 l
->state
= LINK_PEER_RESET
;
569 l
->state
= LINK_RESET
;
571 case LINK_FAILURE_EVT
:
572 case LINK_FAILOVER_BEGIN_EVT
:
573 case LINK_ESTABLISH_EVT
:
574 case LINK_FAILOVER_END_EVT
:
575 case LINK_SYNCH_BEGIN_EVT
:
576 case LINK_SYNCH_END_EVT
:
583 case LINK_PEER_RESET_EVT
:
584 l
->state
= LINK_ESTABLISHING
;
586 case LINK_FAILOVER_BEGIN_EVT
:
587 l
->state
= LINK_FAILINGOVER
;
588 case LINK_FAILURE_EVT
:
590 case LINK_ESTABLISH_EVT
:
591 case LINK_FAILOVER_END_EVT
:
593 case LINK_SYNCH_BEGIN_EVT
:
594 case LINK_SYNCH_END_EVT
:
599 case LINK_PEER_RESET
:
602 l
->state
= LINK_ESTABLISHING
;
604 case LINK_PEER_RESET_EVT
:
605 case LINK_ESTABLISH_EVT
:
606 case LINK_FAILURE_EVT
:
608 case LINK_SYNCH_BEGIN_EVT
:
609 case LINK_SYNCH_END_EVT
:
610 case LINK_FAILOVER_BEGIN_EVT
:
611 case LINK_FAILOVER_END_EVT
:
616 case LINK_FAILINGOVER
:
618 case LINK_FAILOVER_END_EVT
:
619 l
->state
= LINK_RESET
;
621 case LINK_PEER_RESET_EVT
:
623 case LINK_ESTABLISH_EVT
:
624 case LINK_FAILURE_EVT
:
626 case LINK_FAILOVER_BEGIN_EVT
:
627 case LINK_SYNCH_BEGIN_EVT
:
628 case LINK_SYNCH_END_EVT
:
633 case LINK_ESTABLISHING
:
635 case LINK_ESTABLISH_EVT
:
636 l
->state
= LINK_ESTABLISHED
;
638 case LINK_FAILOVER_BEGIN_EVT
:
639 l
->state
= LINK_FAILINGOVER
;
642 l
->state
= LINK_RESET
;
644 case LINK_FAILURE_EVT
:
645 case LINK_PEER_RESET_EVT
:
646 case LINK_SYNCH_BEGIN_EVT
:
647 case LINK_FAILOVER_END_EVT
:
649 case LINK_SYNCH_END_EVT
:
654 case LINK_ESTABLISHED
:
656 case LINK_PEER_RESET_EVT
:
657 l
->state
= LINK_PEER_RESET
;
658 rc
|= TIPC_LINK_DOWN_EVT
;
660 case LINK_FAILURE_EVT
:
661 l
->state
= LINK_RESETTING
;
662 rc
|= TIPC_LINK_DOWN_EVT
;
665 l
->state
= LINK_RESET
;
667 case LINK_ESTABLISH_EVT
:
668 case LINK_SYNCH_END_EVT
:
670 case LINK_SYNCH_BEGIN_EVT
:
671 l
->state
= LINK_SYNCHING
;
673 case LINK_FAILOVER_BEGIN_EVT
:
674 case LINK_FAILOVER_END_EVT
:
681 case LINK_PEER_RESET_EVT
:
682 l
->state
= LINK_PEER_RESET
;
683 rc
|= TIPC_LINK_DOWN_EVT
;
685 case LINK_FAILURE_EVT
:
686 l
->state
= LINK_RESETTING
;
687 rc
|= TIPC_LINK_DOWN_EVT
;
690 l
->state
= LINK_RESET
;
692 case LINK_ESTABLISH_EVT
:
693 case LINK_SYNCH_BEGIN_EVT
:
695 case LINK_SYNCH_END_EVT
:
696 l
->state
= LINK_ESTABLISHED
;
698 case LINK_FAILOVER_BEGIN_EVT
:
699 case LINK_FAILOVER_END_EVT
:
705 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
707 trace_tipc_link_fsm(l
->name
, old_state
, l
->state
, evt
);
710 pr_err("Illegal FSM event %x in state %x on link %s\n",
711 evt
, l
->state
, l
->name
);
712 trace_tipc_link_fsm(l
->name
, old_state
, l
->state
, evt
);
716 /* link_profile_stats - update statistical profiling of traffic
718 static void link_profile_stats(struct tipc_link
*l
)
721 struct tipc_msg
*msg
;
724 /* Update counters used in statistical profiling of send traffic */
725 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
726 l
->stats
.queue_sz_counts
++;
728 skb
= skb_peek(&l
->transmq
);
732 length
= msg_size(msg
);
734 if (msg_user(msg
) == MSG_FRAGMENTER
) {
735 if (msg_type(msg
) != FIRST_FRAGMENT
)
737 length
= msg_size(msg_get_wrapped(msg
));
739 l
->stats
.msg_lengths_total
+= length
;
740 l
->stats
.msg_length_counts
++;
742 l
->stats
.msg_length_profile
[0]++;
743 else if (length
<= 256)
744 l
->stats
.msg_length_profile
[1]++;
745 else if (length
<= 1024)
746 l
->stats
.msg_length_profile
[2]++;
747 else if (length
<= 4096)
748 l
->stats
.msg_length_profile
[3]++;
749 else if (length
<= 16384)
750 l
->stats
.msg_length_profile
[4]++;
751 else if (length
<= 32768)
752 l
->stats
.msg_length_profile
[5]++;
754 l
->stats
.msg_length_profile
[6]++;
758 * tipc_link_too_silent - check if link is "too silent"
759 * @l: tipc link to be checked
761 * Returns true if the link 'silent_intv_cnt' is about to reach the
762 * 'abort_limit' value, otherwise false
764 bool tipc_link_too_silent(struct tipc_link
*l
)
766 return (l
->silent_intv_cnt
+ 2 > l
->abort_limit
);
769 /* tipc_link_timeout - perform periodic task as instructed from node timeout
771 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
778 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
779 u16 bc_acked
= l
->bc_rcvlink
->acked
;
780 struct tipc_mon_state
*mstate
= &l
->mon_state
;
782 trace_tipc_link_timeout(l
, TIPC_DUMP_NONE
, " ");
783 trace_tipc_link_too_silent(l
, TIPC_DUMP_ALL
, " ");
785 case LINK_ESTABLISHED
:
788 link_profile_stats(l
);
789 tipc_mon_get_state(l
->net
, l
->addr
, mstate
, l
->bearer_id
);
790 if (mstate
->reset
|| (l
->silent_intv_cnt
> l
->abort_limit
))
791 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
792 state
= bc_acked
!= bc_snt
;
793 state
|= l
->bc_rcvlink
->rcv_unacked
;
794 state
|= l
->rcv_unacked
;
795 state
|= !skb_queue_empty(&l
->transmq
);
796 state
|= !skb_queue_empty(&l
->deferdq
);
797 probe
= mstate
->probing
;
798 probe
|= l
->silent_intv_cnt
;
799 if (probe
|| mstate
->monitoring
)
800 l
->silent_intv_cnt
++;
803 setup
= l
->rst_cnt
++ <= 4;
804 setup
|= !(l
->rst_cnt
% 16);
807 case LINK_ESTABLISHING
:
811 case LINK_PEER_RESET
:
813 case LINK_FAILINGOVER
:
819 if (state
|| probe
|| setup
)
820 tipc_link_build_proto_msg(l
, mtyp
, probe
, 0, 0, 0, 0, xmitq
);
826 * link_schedule_user - schedule a message sender for wakeup after congestion
828 * @hdr: header of message that is being sent
829 * Create pseudo msg to send back to user when congestion abates
831 static int link_schedule_user(struct tipc_link
*l
, struct tipc_msg
*hdr
)
833 u32 dnode
= tipc_own_addr(l
->net
);
834 u32 dport
= msg_origport(hdr
);
837 /* Create and schedule wakeup pseudo message */
838 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
839 dnode
, l
->addr
, dport
, 0, 0);
842 msg_set_dest_droppable(buf_msg(skb
), true);
843 TIPC_SKB_CB(skb
)->chain_imp
= msg_importance(hdr
);
844 skb_queue_tail(&l
->wakeupq
, skb
);
845 l
->stats
.link_congs
++;
846 trace_tipc_link_conges(l
, TIPC_DUMP_ALL
, "wakeup scheduled!");
851 * link_prepare_wakeup - prepare users for wakeup after congestion
853 * Wake up a number of waiting users, as permitted by available space
856 static void link_prepare_wakeup(struct tipc_link
*l
)
858 struct sk_buff
*skb
, *tmp
;
861 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
862 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
863 if (l
->backlog
[imp
].len
< l
->backlog
[imp
].limit
) {
864 skb_unlink(skb
, &l
->wakeupq
);
865 skb_queue_tail(l
->inputq
, skb
);
866 } else if (i
++ > 10) {
872 void tipc_link_reset(struct tipc_link
*l
)
874 struct sk_buff_head list
;
876 __skb_queue_head_init(&list
);
878 l
->in_session
= false;
879 /* Force re-synch of peer session number before establishing */
882 l
->mtu
= l
->advertised_mtu
;
884 spin_lock_bh(&l
->wakeupq
.lock
);
885 skb_queue_splice_init(&l
->wakeupq
, &list
);
886 spin_unlock_bh(&l
->wakeupq
.lock
);
888 spin_lock_bh(&l
->inputq
->lock
);
889 skb_queue_splice_init(&list
, l
->inputq
);
890 spin_unlock_bh(&l
->inputq
->lock
);
892 __skb_queue_purge(&l
->transmq
);
893 __skb_queue_purge(&l
->deferdq
);
894 __skb_queue_purge(&l
->backlogq
);
895 __skb_queue_purge(&l
->failover_deferdq
);
896 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
897 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
898 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
899 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
900 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
901 kfree_skb(l
->reasm_buf
);
902 kfree_skb(l
->failover_reasm_skb
);
904 l
->failover_reasm_skb
= NULL
;
908 l
->snd_nxt_state
= 1;
909 l
->rcv_nxt_state
= 1;
911 l
->silent_intv_cnt
= 0;
914 l
->bc_peer_is_up
= false;
915 memset(&l
->mon_state
, 0, sizeof(l
->mon_state
));
916 tipc_link_reset_stats(l
);
920 * tipc_link_xmit(): enqueue buffer list according to queue situation
922 * @list: chain of buffers containing message
923 * @xmitq: returned list of packets to be sent by caller
925 * Consumes the buffer chain.
926 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
927 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
929 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
930 struct sk_buff_head
*xmitq
)
932 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
933 unsigned int maxwin
= l
->window
;
934 int imp
= msg_importance(hdr
);
935 unsigned int mtu
= l
->mtu
;
936 u16 ack
= l
->rcv_nxt
- 1;
937 u16 seqno
= l
->snd_nxt
;
938 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
939 struct sk_buff_head
*transmq
= &l
->transmq
;
940 struct sk_buff_head
*backlogq
= &l
->backlogq
;
941 struct sk_buff
*skb
, *_skb
, *bskb
;
942 int pkt_cnt
= skb_queue_len(list
);
945 if (unlikely(msg_size(hdr
) > mtu
)) {
946 skb_queue_purge(list
);
950 /* Allow oversubscription of one data msg per source at congestion */
951 if (unlikely(l
->backlog
[imp
].len
>= l
->backlog
[imp
].limit
)) {
952 if (imp
== TIPC_SYSTEM_IMPORTANCE
) {
953 pr_warn("%s<%s>, link overflow", link_rst_msg
, l
->name
);
956 rc
= link_schedule_user(l
, hdr
);
960 l
->stats
.sent_fragmented
++;
961 l
->stats
.sent_fragments
+= pkt_cnt
;
964 /* Prepare each packet for sending, and add to relevant queue: */
965 while (skb_queue_len(list
)) {
966 skb
= skb_peek(list
);
968 msg_set_seqno(hdr
, seqno
);
969 msg_set_ack(hdr
, ack
);
970 msg_set_bcast_ack(hdr
, bc_ack
);
972 if (likely(skb_queue_len(transmq
) < maxwin
)) {
973 _skb
= skb_clone(skb
, GFP_ATOMIC
);
975 skb_queue_purge(list
);
979 __skb_queue_tail(transmq
, skb
);
980 /* next retransmit attempt */
981 if (link_is_bc_sndlink(l
))
982 TIPC_SKB_CB(skb
)->nxt_retr
=
983 jiffies
+ TIPC_BC_RETR_LIM
;
984 __skb_queue_tail(xmitq
, _skb
);
985 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
987 l
->stats
.sent_pkts
++;
991 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
992 kfree_skb(__skb_dequeue(list
));
993 l
->stats
.sent_bundled
++;
996 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
997 kfree_skb(__skb_dequeue(list
));
998 __skb_queue_tail(backlogq
, bskb
);
999 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
1000 l
->stats
.sent_bundled
++;
1001 l
->stats
.sent_bundles
++;
1004 l
->backlog
[imp
].len
+= skb_queue_len(list
);
1005 skb_queue_splice_tail_init(list
, backlogq
);
1011 static void tipc_link_advance_backlog(struct tipc_link
*l
,
1012 struct sk_buff_head
*xmitq
)
1014 struct sk_buff
*skb
, *_skb
;
1015 struct tipc_msg
*hdr
;
1016 u16 seqno
= l
->snd_nxt
;
1017 u16 ack
= l
->rcv_nxt
- 1;
1018 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1020 while (skb_queue_len(&l
->transmq
) < l
->window
) {
1021 skb
= skb_peek(&l
->backlogq
);
1024 _skb
= skb_clone(skb
, GFP_ATOMIC
);
1027 __skb_dequeue(&l
->backlogq
);
1029 l
->backlog
[msg_importance(hdr
)].len
--;
1030 __skb_queue_tail(&l
->transmq
, skb
);
1031 /* next retransmit attempt */
1032 if (link_is_bc_sndlink(l
))
1033 TIPC_SKB_CB(skb
)->nxt_retr
= jiffies
+ TIPC_BC_RETR_LIM
;
1035 __skb_queue_tail(xmitq
, _skb
);
1036 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
1037 msg_set_seqno(hdr
, seqno
);
1038 msg_set_ack(hdr
, ack
);
1039 msg_set_bcast_ack(hdr
, bc_ack
);
1041 l
->stats
.sent_pkts
++;
1048 * link_retransmit_failure() - Detect repeated retransmit failures
1049 * @l: tipc link sender
1050 * @r: tipc link receiver (= l in case of unicast)
1051 * @from: seqno of the 1st packet in retransmit request
1052 * @rc: returned code
1054 * Return: true if the repeated retransmit failures happens, otherwise
1057 static bool link_retransmit_failure(struct tipc_link
*l
, struct tipc_link
*r
,
1060 struct sk_buff
*skb
= skb_peek(&l
->transmq
);
1061 struct tipc_msg
*hdr
;
1067 /* Detect repeated retransmit failures on same packet */
1068 if (r
->prev_from
!= from
) {
1069 r
->prev_from
= from
;
1070 r
->stale_limit
= jiffies
+ msecs_to_jiffies(r
->tolerance
);
1072 } else if (++r
->stale_cnt
> 99 && time_after(jiffies
, r
->stale_limit
)) {
1073 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
1074 link_print(l
, "State of link ");
1075 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1076 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
),
1078 pr_info("sqno %u, prev: %x, src: %x\n",
1079 msg_seqno(hdr
), msg_prevnode(hdr
), msg_orignode(hdr
));
1081 trace_tipc_list_dump(&l
->transmq
, true, "retrans failure!");
1082 trace_tipc_link_dump(l
, TIPC_DUMP_NONE
, "retrans failure!");
1083 trace_tipc_link_dump(r
, TIPC_DUMP_NONE
, "retrans failure!");
1085 if (link_is_bc_sndlink(l
))
1086 *rc
= TIPC_LINK_DOWN_EVT
;
1088 *rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1095 /* tipc_link_bc_retrans() - retransmit zero or more packets
1096 * @l: the link to transmit on
1097 * @r: the receiving link ordering the retransmit. Same as l if unicast
1098 * @from: retransmit from (inclusive) this sequence number
1099 * @to: retransmit to (inclusive) this sequence number
1100 * xmitq: queue for accumulating the retransmitted packets
1102 static int tipc_link_bc_retrans(struct tipc_link
*l
, struct tipc_link
*r
,
1103 u16 from
, u16 to
, struct sk_buff_head
*xmitq
)
1105 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
1106 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1107 u16 ack
= l
->rcv_nxt
- 1;
1108 struct tipc_msg
*hdr
;
1116 trace_tipc_link_retrans(r
, from
, to
, &l
->transmq
);
1118 if (link_retransmit_failure(l
, r
, from
, &rc
))
1121 skb_queue_walk(&l
->transmq
, skb
) {
1123 if (less(msg_seqno(hdr
), from
))
1125 if (more(msg_seqno(hdr
), to
))
1127 if (link_is_bc_sndlink(l
)) {
1128 if (time_before(jiffies
, TIPC_SKB_CB(skb
)->nxt_retr
))
1130 TIPC_SKB_CB(skb
)->nxt_retr
= jiffies
+ TIPC_BC_RETR_LIM
;
1132 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
1135 hdr
= buf_msg(_skb
);
1136 msg_set_ack(hdr
, ack
);
1137 msg_set_bcast_ack(hdr
, bc_ack
);
1138 _skb
->priority
= TC_PRIO_CONTROL
;
1139 __skb_queue_tail(xmitq
, _skb
);
1140 l
->stats
.retransmitted
++;
1145 /* tipc_data_input - deliver data and name distr msgs to upper layer
1147 * Consumes buffer if message is of right type
1148 * Node lock must be held
1150 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1151 struct sk_buff_head
*inputq
)
1153 struct sk_buff_head
*mc_inputq
= l
->bc_rcvlink
->inputq
;
1154 struct tipc_msg
*hdr
= buf_msg(skb
);
1156 switch (msg_user(hdr
)) {
1157 case TIPC_LOW_IMPORTANCE
:
1158 case TIPC_MEDIUM_IMPORTANCE
:
1159 case TIPC_HIGH_IMPORTANCE
:
1160 case TIPC_CRITICAL_IMPORTANCE
:
1161 if (unlikely(msg_in_group(hdr
) || msg_mcast(hdr
))) {
1162 skb_queue_tail(mc_inputq
, skb
);
1167 skb_queue_tail(inputq
, skb
);
1169 case GROUP_PROTOCOL
:
1170 skb_queue_tail(mc_inputq
, skb
);
1172 case NAME_DISTRIBUTOR
:
1173 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
1174 skb_queue_tail(l
->namedq
, skb
);
1177 case TUNNEL_PROTOCOL
:
1178 case MSG_FRAGMENTER
:
1179 case BCAST_PROTOCOL
:
1182 pr_warn("Dropping received illegal msg type\n");
1188 /* tipc_link_input - process packet that has passed link protocol check
1192 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1193 struct sk_buff_head
*inputq
,
1194 struct sk_buff
**reasm_skb
)
1196 struct tipc_msg
*hdr
= buf_msg(skb
);
1197 struct sk_buff
*iskb
;
1198 struct sk_buff_head tmpq
;
1199 int usr
= msg_user(hdr
);
1202 if (usr
== MSG_BUNDLER
) {
1203 skb_queue_head_init(&tmpq
);
1204 l
->stats
.recv_bundles
++;
1205 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1206 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1207 tipc_data_input(l
, iskb
, &tmpq
);
1208 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
1210 } else if (usr
== MSG_FRAGMENTER
) {
1211 l
->stats
.recv_fragments
++;
1212 if (tipc_buf_append(reasm_skb
, &skb
)) {
1213 l
->stats
.recv_fragmented
++;
1214 tipc_data_input(l
, skb
, inputq
);
1215 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
1216 pr_warn_ratelimited("Unable to build fragment list\n");
1217 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1220 } else if (usr
== BCAST_PROTOCOL
) {
1221 tipc_bcast_lock(l
->net
);
1222 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
1223 tipc_bcast_unlock(l
->net
);
1230 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1231 * inner message along with the ones in the old link's
1234 * @skb: TUNNEL_PROTOCOL message
1235 * @inputq: queue to put messages ready for delivery
1237 static int tipc_link_tnl_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1238 struct sk_buff_head
*inputq
)
1240 struct sk_buff
**reasm_skb
= &l
->failover_reasm_skb
;
1241 struct sk_buff_head
*fdefq
= &l
->failover_deferdq
;
1242 struct tipc_msg
*hdr
= buf_msg(skb
);
1243 struct sk_buff
*iskb
;
1249 if (msg_type(hdr
) == SYNCH_MSG
)
1253 if (!tipc_msg_extract(skb
, &iskb
, &ipos
)) {
1254 pr_warn_ratelimited("Cannot extract FAILOVER_MSG, defq: %d\n",
1255 skb_queue_len(fdefq
));
1260 seqno
= buf_seqno(iskb
);
1262 if (unlikely(less(seqno
, l
->drop_point
))) {
1267 if (unlikely(seqno
!= l
->drop_point
)) {
1268 __tipc_skb_queue_sorted(fdefq
, seqno
, iskb
);
1274 if (!tipc_data_input(l
, iskb
, inputq
))
1275 rc
|= tipc_link_input(l
, iskb
, inputq
, reasm_skb
);
1278 } while ((iskb
= __tipc_skb_dequeue(fdefq
, l
->drop_point
)));
1285 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
1287 bool released
= false;
1288 struct sk_buff
*skb
, *tmp
;
1290 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1291 if (more(buf_seqno(skb
), acked
))
1293 __skb_unlink(skb
, &l
->transmq
);
1300 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1301 * @l: tipc link that data have come with gaps in sequence if any
1302 * @data: data buffer to store the Gap ACK blocks after built
1304 * returns the actual allocated memory size
1306 static u16
tipc_build_gap_ack_blks(struct tipc_link
*l
, void *data
)
1308 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
1309 struct tipc_gap_ack_blks
*ga
= data
;
1310 u16 len
, expect
, seqno
= 0;
1316 expect
= buf_seqno(skb
);
1317 skb_queue_walk(&l
->deferdq
, skb
) {
1318 seqno
= buf_seqno(skb
);
1319 if (unlikely(more(seqno
, expect
))) {
1320 ga
->gacks
[n
].ack
= htons(expect
- 1);
1321 ga
->gacks
[n
].gap
= htons(seqno
- expect
);
1322 if (++n
>= MAX_GAP_ACK_BLKS
) {
1323 pr_info_ratelimited("Too few Gap ACK blocks!\n");
1326 } else if (unlikely(less(seqno
, expect
))) {
1327 pr_warn("Unexpected skb in deferdq!\n");
1334 ga
->gacks
[n
].ack
= htons(seqno
);
1335 ga
->gacks
[n
].gap
= 0;
1339 len
= tipc_gap_ack_blks_sz(n
);
1340 ga
->len
= htons(len
);
1345 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1346 * acked packets, also doing retransmissions if
1348 * @l: tipc link with transmq queue to be advanced
1349 * @acked: seqno of last packet acked by peer without any gaps before
1350 * @gap: # of gap packets
1351 * @ga: buffer pointer to Gap ACK blocks from peer
1352 * @xmitq: queue for accumulating the retransmitted packets if any
1354 * In case of a repeated retransmit failures, the call will return shortly
1355 * with a returned code (e.g. TIPC_LINK_DOWN_EVT)
1357 static int tipc_link_advance_transmq(struct tipc_link
*l
, u16 acked
, u16 gap
,
1358 struct tipc_gap_ack_blks
*ga
,
1359 struct sk_buff_head
*xmitq
)
1361 struct sk_buff
*skb
, *_skb
, *tmp
;
1362 struct tipc_msg
*hdr
;
1363 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1364 u16 ack
= l
->rcv_nxt
- 1;
1368 if (gap
&& link_retransmit_failure(l
, l
, acked
+ 1, &rc
))
1371 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1372 seqno
= buf_seqno(skb
);
1375 if (less_eq(seqno
, acked
)) {
1377 __skb_unlink(skb
, &l
->transmq
);
1379 } else if (less_eq(seqno
, acked
+ gap
)) {
1380 /* retransmit skb */
1381 if (time_before(jiffies
, TIPC_SKB_CB(skb
)->nxt_retr
))
1383 TIPC_SKB_CB(skb
)->nxt_retr
= TIPC_UC_RETR_TIME
;
1385 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
1388 hdr
= buf_msg(_skb
);
1389 msg_set_ack(hdr
, ack
);
1390 msg_set_bcast_ack(hdr
, bc_ack
);
1391 _skb
->priority
= TC_PRIO_CONTROL
;
1392 __skb_queue_tail(xmitq
, _skb
);
1393 l
->stats
.retransmitted
++;
1395 /* retry with Gap ACK blocks if any */
1396 if (!ga
|| n
>= ga
->gack_cnt
)
1398 acked
= ntohs(ga
->gacks
[n
].ack
);
1399 gap
= ntohs(ga
->gacks
[n
].gap
);
1408 /* tipc_link_build_state_msg: prepare link state message for transmission
1410 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1411 * risk of ack storms towards the sender
1413 int tipc_link_build_state_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1418 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1419 if (link_is_bc_rcvlink(l
)) {
1420 if (((l
->rcv_nxt
^ tipc_own_addr(l
->net
)) & 0xf) != 0xf)
1424 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1425 l
->snd_nxt
= l
->rcv_nxt
;
1426 return TIPC_LINK_SND_STATE
;
1431 l
->stats
.sent_acks
++;
1432 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, 0, xmitq
);
1436 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1438 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1440 int mtyp
= RESET_MSG
;
1441 struct sk_buff
*skb
;
1443 if (l
->state
== LINK_ESTABLISHING
)
1444 mtyp
= ACTIVATE_MSG
;
1446 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, 0, xmitq
);
1448 /* Inform peer that this endpoint is going down if applicable */
1449 skb
= skb_peek_tail(xmitq
);
1450 if (skb
&& (l
->state
== LINK_RESET
))
1451 msg_set_peer_stopping(buf_msg(skb
), 1);
1454 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1455 * Note that sending of broadcast NACK is coordinated among nodes, to
1456 * reduce the risk of NACK storms towards the sender
1458 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
1459 struct sk_buff_head
*xmitq
)
1461 u32 def_cnt
= ++l
->stats
.deferred_recv
;
1462 u32 defq_len
= skb_queue_len(&l
->deferdq
);
1465 if (link_is_bc_rcvlink(l
)) {
1466 match1
= def_cnt
& 0xf;
1467 match2
= tipc_own_addr(l
->net
) & 0xf;
1468 if (match1
== match2
)
1469 return TIPC_LINK_SND_STATE
;
1473 if (defq_len
>= 3 && !((defq_len
- 3) % 16))
1474 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, 0, xmitq
);
1478 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1479 * @l: the link that should handle the message
1481 * @xmitq: queue to place packets to be sent after this call
1483 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1484 struct sk_buff_head
*xmitq
)
1486 struct sk_buff_head
*defq
= &l
->deferdq
;
1487 struct tipc_msg
*hdr
= buf_msg(skb
);
1488 u16 seqno
, rcv_nxt
, win_lim
;
1491 /* Verify and update link state */
1492 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1493 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1495 /* Don't send probe at next timeout expiration */
1496 l
->silent_intv_cnt
= 0;
1500 seqno
= msg_seqno(hdr
);
1501 rcv_nxt
= l
->rcv_nxt
;
1502 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1504 if (unlikely(!link_is_up(l
))) {
1505 if (l
->state
== LINK_ESTABLISHING
)
1506 rc
= TIPC_LINK_UP_EVT
;
1510 /* Drop if outside receive window */
1511 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1512 l
->stats
.duplicates
++;
1516 /* Forward queues and wake up waiting users */
1517 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1519 tipc_link_advance_backlog(l
, xmitq
);
1520 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1521 link_prepare_wakeup(l
);
1524 /* Defer delivery if sequence gap */
1525 if (unlikely(seqno
!= rcv_nxt
)) {
1526 __tipc_skb_queue_sorted(defq
, seqno
, skb
);
1527 rc
|= tipc_link_build_nack_msg(l
, xmitq
);
1531 /* Deliver packet */
1533 l
->stats
.recv_pkts
++;
1535 if (unlikely(msg_user(hdr
) == TUNNEL_PROTOCOL
))
1536 rc
|= tipc_link_tnl_rcv(l
, skb
, l
->inputq
);
1537 else if (!tipc_data_input(l
, skb
, l
->inputq
))
1538 rc
|= tipc_link_input(l
, skb
, l
->inputq
, &l
->reasm_buf
);
1539 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1540 rc
|= tipc_link_build_state_msg(l
, xmitq
);
1541 if (unlikely(rc
& ~TIPC_LINK_SND_STATE
))
1543 } while ((skb
= __tipc_skb_dequeue(defq
, l
->rcv_nxt
)));
1551 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1552 bool probe_reply
, u16 rcvgap
,
1553 int tolerance
, int priority
,
1554 struct sk_buff_head
*xmitq
)
1556 struct tipc_link
*bcl
= l
->bc_rcvlink
;
1557 struct sk_buff
*skb
;
1558 struct tipc_msg
*hdr
;
1559 struct sk_buff_head
*dfq
= &l
->deferdq
;
1560 bool node_up
= link_is_up(bcl
);
1561 struct tipc_mon_state
*mstate
= &l
->mon_state
;
1566 /* Don't send protocol message during reset or link failover */
1567 if (tipc_link_is_blocked(l
))
1570 if (!tipc_link_is_up(l
) && (mtyp
== STATE_MSG
))
1573 if (!skb_queue_empty(dfq
))
1574 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1576 skb
= tipc_msg_create(LINK_PROTOCOL
, mtyp
, INT_H_SIZE
,
1577 tipc_max_domain_size
+ MAX_GAP_ACK_BLKS_SZ
,
1578 l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1583 data
= msg_data(hdr
);
1584 msg_set_session(hdr
, l
->session
);
1585 msg_set_bearer_id(hdr
, l
->bearer_id
);
1586 msg_set_net_plane(hdr
, l
->net_plane
);
1587 msg_set_next_sent(hdr
, l
->snd_nxt
);
1588 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1589 msg_set_bcast_ack(hdr
, bcl
->rcv_nxt
- 1);
1590 msg_set_bc_ack_invalid(hdr
, !node_up
);
1591 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1592 msg_set_link_tolerance(hdr
, tolerance
);
1593 msg_set_linkprio(hdr
, priority
);
1594 msg_set_redundant_link(hdr
, node_up
);
1595 msg_set_seq_gap(hdr
, 0);
1596 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1598 if (mtyp
== STATE_MSG
) {
1599 if (l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
)
1600 msg_set_seqno(hdr
, l
->snd_nxt_state
++);
1601 msg_set_seq_gap(hdr
, rcvgap
);
1602 msg_set_bc_gap(hdr
, link_bc_rcv_gap(bcl
));
1603 msg_set_probe(hdr
, probe
);
1604 msg_set_is_keepalive(hdr
, probe
|| probe_reply
);
1605 if (l
->peer_caps
& TIPC_GAP_ACK_BLOCK
)
1606 glen
= tipc_build_gap_ack_blks(l
, data
);
1607 tipc_mon_prep(l
->net
, data
+ glen
, &dlen
, mstate
, l
->bearer_id
);
1608 msg_set_size(hdr
, INT_H_SIZE
+ glen
+ dlen
);
1609 skb_trim(skb
, INT_H_SIZE
+ glen
+ dlen
);
1610 l
->stats
.sent_states
++;
1613 /* RESET_MSG or ACTIVATE_MSG */
1614 if (mtyp
== ACTIVATE_MSG
) {
1615 msg_set_dest_session_valid(hdr
, 1);
1616 msg_set_dest_session(hdr
, l
->peer_session
);
1618 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1619 strcpy(data
, l
->if_name
);
1620 msg_set_size(hdr
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1621 skb_trim(skb
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1624 l
->stats
.sent_probes
++;
1626 l
->stats
.sent_nacks
++;
1627 skb
->priority
= TC_PRIO_CONTROL
;
1628 __skb_queue_tail(xmitq
, skb
);
1629 trace_tipc_proto_build(skb
, false, l
->name
);
1632 void tipc_link_create_dummy_tnl_msg(struct tipc_link
*l
,
1633 struct sk_buff_head
*xmitq
)
1635 u32 onode
= tipc_own_addr(l
->net
);
1636 struct tipc_msg
*hdr
, *ihdr
;
1637 struct sk_buff_head tnlq
;
1638 struct sk_buff
*skb
;
1639 u32 dnode
= l
->addr
;
1641 skb_queue_head_init(&tnlq
);
1642 skb
= tipc_msg_create(TUNNEL_PROTOCOL
, FAILOVER_MSG
,
1643 INT_H_SIZE
, BASIC_H_SIZE
,
1644 dnode
, onode
, 0, 0, 0);
1646 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1651 msg_set_msgcnt(hdr
, 1);
1652 msg_set_bearer_id(hdr
, l
->peer_bearer_id
);
1654 ihdr
= (struct tipc_msg
*)msg_data(hdr
);
1655 tipc_msg_init(onode
, ihdr
, TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1656 BASIC_H_SIZE
, dnode
);
1657 msg_set_errcode(ihdr
, TIPC_ERR_NO_PORT
);
1658 __skb_queue_tail(&tnlq
, skb
);
1659 tipc_link_xmit(l
, &tnlq
, xmitq
);
1662 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1663 * with contents of the link's transmit and backlog queues.
1665 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1666 int mtyp
, struct sk_buff_head
*xmitq
)
1668 struct sk_buff_head
*fdefq
= &tnl
->failover_deferdq
;
1669 struct sk_buff
*skb
, *tnlskb
;
1670 struct tipc_msg
*hdr
, tnlhdr
;
1671 struct sk_buff_head
*queue
= &l
->transmq
;
1672 struct sk_buff_head tmpxq
, tnlq
;
1673 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1678 skb_queue_head_init(&tnlq
);
1679 skb_queue_head_init(&tmpxq
);
1681 /* At least one packet required for safe algorithm => add dummy */
1682 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1683 BASIC_H_SIZE
, 0, l
->addr
, tipc_own_addr(l
->net
),
1684 0, 0, TIPC_ERR_NO_PORT
);
1686 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1689 skb_queue_tail(&tnlq
, skb
);
1690 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1691 __skb_queue_purge(&tmpxq
);
1693 /* Initialize reusable tunnel packet header */
1694 tipc_msg_init(tipc_own_addr(l
->net
), &tnlhdr
, TUNNEL_PROTOCOL
,
1695 mtyp
, INT_H_SIZE
, l
->addr
);
1696 if (mtyp
== SYNCH_MSG
)
1697 pktcnt
= l
->snd_nxt
- buf_seqno(skb_peek(&l
->transmq
));
1699 pktcnt
= skb_queue_len(&l
->transmq
);
1700 pktcnt
+= skb_queue_len(&l
->backlogq
);
1701 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1702 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1704 /* Wrap each packet into a tunnel packet */
1705 skb_queue_walk(queue
, skb
) {
1707 if (queue
== &l
->backlogq
)
1708 msg_set_seqno(hdr
, seqno
++);
1709 pktlen
= msg_size(hdr
);
1710 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1711 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
, GFP_ATOMIC
);
1713 pr_warn("%sunable to send packet\n", link_co_err
);
1716 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1717 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1718 __skb_queue_tail(&tnlq
, tnlskb
);
1720 if (queue
!= &l
->backlogq
) {
1721 queue
= &l
->backlogq
;
1725 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1727 if (mtyp
== FAILOVER_MSG
) {
1728 tnl
->drop_point
= l
->rcv_nxt
;
1729 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1730 l
->reasm_buf
= NULL
;
1732 /* Failover the link's deferdq */
1733 if (unlikely(!skb_queue_empty(fdefq
))) {
1734 pr_warn("Link failover deferdq not empty: %d!\n",
1735 skb_queue_len(fdefq
));
1736 __skb_queue_purge(fdefq
);
1738 skb_queue_splice_init(&l
->deferdq
, fdefq
);
1743 * tipc_link_failover_prepare() - prepare tnl for link failover
1745 * This is a special version of the precursor - tipc_link_tnl_prepare(),
1746 * see the tipc_node_link_failover() for details
1750 * @xmitq: queue for messages to be xmited
1752 void tipc_link_failover_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1753 struct sk_buff_head
*xmitq
)
1755 struct sk_buff_head
*fdefq
= &tnl
->failover_deferdq
;
1757 tipc_link_create_dummy_tnl_msg(tnl
, xmitq
);
1759 /* This failover link enpoint was never established before,
1760 * so it has not received anything from peer.
1761 * Otherwise, it must be a normal failover situation or the
1762 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1763 * would have to start over from scratch instead.
1765 tnl
->drop_point
= 1;
1766 tnl
->failover_reasm_skb
= NULL
;
1768 /* Initiate the link's failover deferdq */
1769 if (unlikely(!skb_queue_empty(fdefq
))) {
1770 pr_warn("Link failover deferdq not empty: %d!\n",
1771 skb_queue_len(fdefq
));
1772 __skb_queue_purge(fdefq
);
1776 /* tipc_link_validate_msg(): validate message against current link state
1777 * Returns true if message should be accepted, otherwise false
1779 bool tipc_link_validate_msg(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1781 u16 curr_session
= l
->peer_session
;
1782 u16 session
= msg_session(hdr
);
1783 int mtyp
= msg_type(hdr
);
1785 if (msg_user(hdr
) != LINK_PROTOCOL
)
1792 /* Accept only RESET with new session number */
1793 return more(session
, curr_session
);
1797 /* Accept only ACTIVATE with new or current session number */
1798 return !less(session
, curr_session
);
1800 /* Accept only STATE with current session number */
1803 if (session
!= curr_session
)
1805 /* Extra sanity check */
1806 if (!link_is_up(l
) && msg_ack(hdr
))
1808 if (!(l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
))
1810 /* Accept only STATE with new sequence number */
1811 return !less(msg_seqno(hdr
), l
->rcv_nxt_state
);
1817 /* tipc_link_proto_rcv(): receive link level protocol message :
1818 * Note that network plane id propagates through the network, and may
1819 * change at any time. The node with lowest numerical id determines
1822 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1823 struct sk_buff_head
*xmitq
)
1825 struct tipc_msg
*hdr
= buf_msg(skb
);
1826 struct tipc_gap_ack_blks
*ga
= NULL
;
1828 u16 ack
= msg_ack(hdr
);
1829 u16 gap
= msg_seq_gap(hdr
);
1830 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1831 u16 peers_tol
= msg_link_tolerance(hdr
);
1832 u16 peers_prio
= msg_linkprio(hdr
);
1833 u16 rcv_nxt
= l
->rcv_nxt
;
1834 u16 dlen
= msg_data_sz(hdr
);
1835 int mtyp
= msg_type(hdr
);
1836 bool reply
= msg_probe(hdr
);
1842 trace_tipc_proto_rcv(skb
, false, l
->name
);
1843 if (tipc_link_is_blocked(l
) || !xmitq
)
1846 if (tipc_own_addr(l
->net
) > msg_prevnode(hdr
))
1847 l
->net_plane
= msg_net_plane(hdr
);
1851 data
= msg_data(hdr
);
1853 if (!tipc_link_validate_msg(l
, hdr
)) {
1854 trace_tipc_skb_dump(skb
, false, "PROTO invalid (1)!");
1855 trace_tipc_link_dump(l
, TIPC_DUMP_NONE
, "PROTO invalid (1)!");
1862 /* Complete own link name with peer's interface name */
1863 if_name
= strrchr(l
->name
, ':') + 1;
1864 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1866 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1868 strncpy(if_name
, data
, TIPC_MAX_IF_NAME
);
1870 /* Update own tolerance if peer indicates a non-zero value */
1871 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
)) {
1872 l
->tolerance
= peers_tol
;
1873 l
->bc_rcvlink
->tolerance
= peers_tol
;
1875 /* Update own priority if peer's priority is higher */
1876 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1877 l
->priority
= peers_prio
;
1879 /* If peer is going down we want full re-establish cycle */
1880 if (msg_peer_stopping(hdr
)) {
1881 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1885 /* If this endpoint was re-created while peer was ESTABLISHING
1886 * it doesn't know current session number. Force re-synch.
1888 if (mtyp
== ACTIVATE_MSG
&& msg_dest_session_valid(hdr
) &&
1889 l
->session
!= msg_dest_session(hdr
)) {
1890 if (less(l
->session
, msg_dest_session(hdr
)))
1891 l
->session
= msg_dest_session(hdr
) + 1;
1895 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1896 if (mtyp
== RESET_MSG
|| !link_is_up(l
))
1897 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1899 /* ACTIVATE_MSG takes up link if it was already locally reset */
1900 if (mtyp
== ACTIVATE_MSG
&& l
->state
== LINK_ESTABLISHING
)
1901 rc
= TIPC_LINK_UP_EVT
;
1903 l
->peer_session
= msg_session(hdr
);
1904 l
->in_session
= true;
1905 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1906 if (l
->mtu
> msg_max_pkt(hdr
))
1907 l
->mtu
= msg_max_pkt(hdr
);
1911 l
->rcv_nxt_state
= msg_seqno(hdr
) + 1;
1913 /* Update own tolerance if peer indicates a non-zero value */
1914 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
)) {
1915 l
->tolerance
= peers_tol
;
1916 l
->bc_rcvlink
->tolerance
= peers_tol
;
1918 /* Update own prio if peer indicates a different value */
1919 if ((peers_prio
!= l
->priority
) &&
1920 in_range(peers_prio
, 1, TIPC_MAX_LINK_PRI
)) {
1921 l
->priority
= peers_prio
;
1922 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1925 l
->silent_intv_cnt
= 0;
1926 l
->stats
.recv_states
++;
1928 l
->stats
.recv_probes
++;
1930 if (!link_is_up(l
)) {
1931 if (l
->state
== LINK_ESTABLISHING
)
1932 rc
= TIPC_LINK_UP_EVT
;
1936 /* Receive Gap ACK blocks from peer if any */
1937 if (l
->peer_caps
& TIPC_GAP_ACK_BLOCK
) {
1938 ga
= (struct tipc_gap_ack_blks
*)data
;
1939 glen
= ntohs(ga
->len
);
1940 /* sanity check: if failed, ignore Gap ACK blocks */
1941 if (glen
!= tipc_gap_ack_blks_sz(ga
->gack_cnt
))
1945 tipc_mon_rcv(l
->net
, data
+ glen
, dlen
- glen
, l
->addr
,
1946 &l
->mon_state
, l
->bearer_id
);
1948 /* Send NACK if peer has sent pkts we haven't received yet */
1949 if (more(peers_snd_nxt
, rcv_nxt
) && !tipc_link_is_synching(l
))
1950 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1951 if (rcvgap
|| reply
)
1952 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, reply
,
1953 rcvgap
, 0, 0, xmitq
);
1955 rc
|= tipc_link_advance_transmq(l
, ack
, gap
, ga
, xmitq
);
1957 /* If NACK, retransmit will now start at right position */
1959 l
->stats
.recv_nacks
++;
1961 tipc_link_advance_backlog(l
, xmitq
);
1962 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1963 link_prepare_wakeup(l
);
1970 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1972 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
1974 struct sk_buff_head
*xmitq
)
1976 struct sk_buff
*skb
;
1977 struct tipc_msg
*hdr
;
1978 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
1979 u16 ack
= l
->rcv_nxt
- 1;
1980 u16 gap_to
= peers_snd_nxt
- 1;
1982 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
1983 0, l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1987 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1988 msg_set_bcast_ack(hdr
, ack
);
1989 msg_set_bcgap_after(hdr
, ack
);
1991 gap_to
= buf_seqno(dfrd_skb
) - 1;
1992 msg_set_bcgap_to(hdr
, gap_to
);
1993 msg_set_non_seq(hdr
, bcast
);
1994 __skb_queue_tail(xmitq
, skb
);
1998 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2000 * Give a newly added peer node the sequence number where it should
2001 * start receiving and acking broadcast packets.
2003 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
2004 struct sk_buff_head
*xmitq
)
2006 struct sk_buff_head list
;
2008 __skb_queue_head_init(&list
);
2009 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
2011 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list
)), true);
2012 tipc_link_xmit(l
, &list
, xmitq
);
2015 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2017 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
2019 int mtyp
= msg_type(hdr
);
2020 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
2025 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
2026 l
->rcv_nxt
= peers_snd_nxt
;
2027 l
->state
= LINK_ESTABLISHED
;
2031 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
2034 if (msg_peer_node_is_up(hdr
))
2037 /* Compatibility: accept older, less safe initial synch data */
2038 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
2039 l
->rcv_nxt
= peers_snd_nxt
;
2042 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2044 int tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
2045 struct sk_buff_head
*xmitq
)
2047 struct tipc_link
*snd_l
= l
->bc_sndlink
;
2048 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
2049 u16 from
= msg_bcast_ack(hdr
) + 1;
2050 u16 to
= from
+ msg_bc_gap(hdr
) - 1;
2056 if (!msg_peer_node_is_up(hdr
))
2059 /* Open when peer ackowledges our bcast init msg (pkt #1) */
2061 l
->bc_peer_is_up
= true;
2063 if (!l
->bc_peer_is_up
)
2066 l
->stats
.recv_nacks
++;
2068 /* Ignore if peers_snd_nxt goes beyond receive window */
2069 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
2072 rc
= tipc_link_bc_retrans(snd_l
, l
, from
, to
, xmitq
);
2074 l
->snd_nxt
= peers_snd_nxt
;
2075 if (link_bc_rcv_gap(l
))
2076 rc
|= TIPC_LINK_SND_STATE
;
2078 /* Return now if sender supports nack via STATE messages */
2079 if (l
->peer_caps
& TIPC_BCAST_STATE_NACK
)
2082 /* Otherwise, be backwards compatible */
2084 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
2085 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
2089 /* Don't NACK if one was recently sent or peeked */
2090 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
2091 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
2095 /* Conditionally delay NACK sending until next synch rcv */
2096 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
2097 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
2098 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
2102 /* Send NACK now but suppress next one */
2103 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
2104 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
2108 void tipc_link_bc_ack_rcv(struct tipc_link
*l
, u16 acked
,
2109 struct sk_buff_head
*xmitq
)
2111 struct sk_buff
*skb
, *tmp
;
2112 struct tipc_link
*snd_l
= l
->bc_sndlink
;
2114 if (!link_is_up(l
) || !l
->bc_peer_is_up
)
2117 if (!more(acked
, l
->acked
))
2120 trace_tipc_link_bc_ack(l
, l
->acked
, acked
, &snd_l
->transmq
);
2121 /* Skip over packets peer has already acked */
2122 skb_queue_walk(&snd_l
->transmq
, skb
) {
2123 if (more(buf_seqno(skb
), l
->acked
))
2127 /* Update/release the packets peer is acking now */
2128 skb_queue_walk_from_safe(&snd_l
->transmq
, skb
, tmp
) {
2129 if (more(buf_seqno(skb
), acked
))
2131 if (!--TIPC_SKB_CB(skb
)->ackers
) {
2132 __skb_unlink(skb
, &snd_l
->transmq
);
2137 tipc_link_advance_backlog(snd_l
, xmitq
);
2138 if (unlikely(!skb_queue_empty(&snd_l
->wakeupq
)))
2139 link_prepare_wakeup(snd_l
);
2142 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2143 * This function is here for backwards compatibility, since
2144 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2146 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
2147 struct sk_buff_head
*xmitq
)
2149 struct tipc_msg
*hdr
= buf_msg(skb
);
2150 u32 dnode
= msg_destnode(hdr
);
2151 int mtyp
= msg_type(hdr
);
2152 u16 acked
= msg_bcast_ack(hdr
);
2153 u16 from
= acked
+ 1;
2154 u16 to
= msg_bcgap_to(hdr
);
2155 u16 peers_snd_nxt
= to
+ 1;
2160 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
2163 if (mtyp
!= STATE_MSG
)
2166 if (dnode
== tipc_own_addr(l
->net
)) {
2167 tipc_link_bc_ack_rcv(l
, acked
, xmitq
);
2168 rc
= tipc_link_bc_retrans(l
->bc_sndlink
, l
, from
, to
, xmitq
);
2169 l
->stats
.recv_nacks
++;
2173 /* Msg for other node => suppress own NACK at next sync if applicable */
2174 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
2175 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
2180 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
2182 int max_bulk
= TIPC_MAX_PUBL
/ (l
->mtu
/ ITEM_SIZE
);
2185 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= max_t(u16
, 50, win
);
2186 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= max_t(u16
, 100, win
* 2);
2187 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= max_t(u16
, 150, win
* 3);
2188 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= max_t(u16
, 200, win
* 4);
2189 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
2193 * link_reset_stats - reset link statistics
2194 * @l: pointer to link
2196 void tipc_link_reset_stats(struct tipc_link
*l
)
2198 memset(&l
->stats
, 0, sizeof(l
->stats
));
2201 static void link_print(struct tipc_link
*l
, const char *str
)
2203 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
2204 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
2205 u16 tail
= l
->snd_nxt
- 1;
2207 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
2208 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2209 skb_queue_len(&l
->transmq
), head
, tail
,
2210 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
2213 /* Parse and validate nested (link) properties valid for media, bearer and link
2215 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
2219 err
= nla_parse_nested_deprecated(props
, TIPC_NLA_PROP_MAX
, prop
,
2220 tipc_nl_prop_policy
, NULL
);
2224 if (props
[TIPC_NLA_PROP_PRIO
]) {
2227 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
2228 if (prio
> TIPC_MAX_LINK_PRI
)
2232 if (props
[TIPC_NLA_PROP_TOL
]) {
2235 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
2236 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
2240 if (props
[TIPC_NLA_PROP_WIN
]) {
2243 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
2244 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
2251 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
2254 struct nlattr
*stats
;
2261 struct nla_map map
[] = {
2262 {TIPC_NLA_STATS_RX_INFO
, 0},
2263 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
2264 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
2265 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
2266 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
2267 {TIPC_NLA_STATS_TX_INFO
, 0},
2268 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
2269 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
2270 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
2271 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
2272 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
2273 s
->msg_length_counts
: 1},
2274 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
2275 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
2276 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
2277 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
2278 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
2279 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
2280 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
2281 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
2282 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
2283 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
2284 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
2285 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
2286 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
2287 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
2288 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
2289 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
2290 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
2291 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
2292 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
2293 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
2294 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
2295 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
2296 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
2299 stats
= nla_nest_start_noflag(skb
, TIPC_NLA_LINK_STATS
);
2303 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2304 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2307 nla_nest_end(skb
, stats
);
2311 nla_nest_cancel(skb
, stats
);
2316 /* Caller should hold appropriate locks to protect the link */
2317 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
2318 struct tipc_link
*link
, int nlflags
)
2320 u32 self
= tipc_own_addr(net
);
2321 struct nlattr
*attrs
;
2322 struct nlattr
*prop
;
2326 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2327 nlflags
, TIPC_NL_LINK_GET
);
2331 attrs
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK
);
2335 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
2337 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
, tipc_cluster_mask(self
)))
2339 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
2341 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->stats
.recv_pkts
))
2343 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->stats
.sent_pkts
))
2346 if (tipc_link_is_up(link
))
2347 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2350 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
2353 prop
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK_PROP
);
2356 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2358 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
2360 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
2363 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2365 nla_nest_end(msg
->skb
, prop
);
2367 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
2371 nla_nest_end(msg
->skb
, attrs
);
2372 genlmsg_end(msg
->skb
, hdr
);
2377 nla_nest_cancel(msg
->skb
, prop
);
2379 nla_nest_cancel(msg
->skb
, attrs
);
2381 genlmsg_cancel(msg
->skb
, hdr
);
2386 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
2387 struct tipc_stats
*stats
)
2390 struct nlattr
*nest
;
2397 struct nla_map map
[] = {
2398 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_pkts
},
2399 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
2400 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
2401 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
2402 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
2403 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_pkts
},
2404 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
2405 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
2406 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
2407 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
2408 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
2409 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
2410 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
2411 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
2412 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
2413 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
2414 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
2415 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
2416 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
2417 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
2420 nest
= nla_nest_start_noflag(skb
, TIPC_NLA_LINK_STATS
);
2424 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2425 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2428 nla_nest_end(skb
, nest
);
2432 nla_nest_cancel(skb
, nest
);
2437 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
2441 struct nlattr
*attrs
;
2442 struct nlattr
*prop
;
2443 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2444 u32 bc_mode
= tipc_bcast_get_broadcast_mode(net
);
2445 u32 bc_ratio
= tipc_bcast_get_broadcast_ratio(net
);
2446 struct tipc_link
*bcl
= tn
->bcl
;
2451 tipc_bcast_lock(net
);
2453 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2454 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
2456 tipc_bcast_unlock(net
);
2460 attrs
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK
);
2464 /* The broadcast link is always up */
2465 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2468 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
2470 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
2472 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, 0))
2474 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, 0))
2477 prop
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK_PROP
);
2480 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->window
))
2482 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_BROADCAST
, bc_mode
))
2484 if (bc_mode
& BCLINK_MODE_SEL
)
2485 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_BROADCAST_RATIO
,
2488 nla_nest_end(msg
->skb
, prop
);
2490 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
2494 tipc_bcast_unlock(net
);
2495 nla_nest_end(msg
->skb
, attrs
);
2496 genlmsg_end(msg
->skb
, hdr
);
2501 nla_nest_cancel(msg
->skb
, prop
);
2503 nla_nest_cancel(msg
->skb
, attrs
);
2505 tipc_bcast_unlock(net
);
2506 genlmsg_cancel(msg
->skb
, hdr
);
2511 void tipc_link_set_tolerance(struct tipc_link
*l
, u32 tol
,
2512 struct sk_buff_head
*xmitq
)
2516 l
->bc_rcvlink
->tolerance
= tol
;
2518 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, tol
, 0, xmitq
);
2521 void tipc_link_set_prio(struct tipc_link
*l
, u32 prio
,
2522 struct sk_buff_head
*xmitq
)
2525 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, prio
, xmitq
);
2528 void tipc_link_set_abort_limit(struct tipc_link
*l
, u32 limit
)
2530 l
->abort_limit
= limit
;
2533 char *tipc_link_name_ext(struct tipc_link
*l
, char *buf
)
2536 scnprintf(buf
, TIPC_MAX_LINK_NAME
, "null");
2537 else if (link_is_bc_sndlink(l
))
2538 scnprintf(buf
, TIPC_MAX_LINK_NAME
, "broadcast-sender");
2539 else if (link_is_bc_rcvlink(l
))
2540 scnprintf(buf
, TIPC_MAX_LINK_NAME
,
2541 "broadcast-receiver, peer %x", l
->addr
);
2543 memcpy(buf
, l
->name
, TIPC_MAX_LINK_NAME
);
2549 * tipc_link_dump - dump TIPC link data
2550 * @l: tipc link to be dumped
2551 * @dqueues: bitmask to decide if any link queue to be dumped?
2552 * - TIPC_DUMP_NONE: don't dump link queues
2553 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2554 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2555 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2556 * - TIPC_DUMP_INPUTQ: dump link input queue
2557 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2558 * - TIPC_DUMP_ALL: dump all the link queues above
2559 * @buf: returned buffer of dump data in format
2561 int tipc_link_dump(struct tipc_link
*l
, u16 dqueues
, char *buf
)
2564 size_t sz
= (dqueues
) ? LINK_LMAX
: LINK_LMIN
;
2565 struct sk_buff_head
*list
;
2566 struct sk_buff
*hskb
, *tskb
;
2570 i
+= scnprintf(buf
, sz
, "link data: (null)\n");
2574 i
+= scnprintf(buf
, sz
, "link data: %x", l
->addr
);
2575 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", l
->state
);
2576 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->in_session
);
2577 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->session
);
2578 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->peer_session
);
2579 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->snd_nxt
);
2580 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rcv_nxt
);
2581 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->snd_nxt_state
);
2582 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rcv_nxt_state
);
2583 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", l
->peer_caps
);
2584 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->silent_intv_cnt
);
2585 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rst_cnt
);
2586 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->prev_from
);
2587 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->stale_cnt
);
2588 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->acked
);
2591 len
= skb_queue_len(list
);
2592 hskb
= skb_peek(list
);
2593 tskb
= skb_peek_tail(list
);
2594 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2595 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2596 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2599 len
= skb_queue_len(list
);
2600 hskb
= skb_peek(list
);
2601 tskb
= skb_peek_tail(list
);
2602 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2603 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2604 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2606 list
= &l
->backlogq
;
2607 len
= skb_queue_len(list
);
2608 hskb
= skb_peek(list
);
2609 tskb
= skb_peek_tail(list
);
2610 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2611 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2612 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2615 len
= skb_queue_len(list
);
2616 hskb
= skb_peek(list
);
2617 tskb
= skb_peek_tail(list
);
2618 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u\n", len
,
2619 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2620 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2622 if (dqueues
& TIPC_DUMP_TRANSMQ
) {
2623 i
+= scnprintf(buf
+ i
, sz
- i
, "transmq: ");
2624 i
+= tipc_list_dump(&l
->transmq
, false, buf
+ i
);
2626 if (dqueues
& TIPC_DUMP_BACKLOGQ
) {
2627 i
+= scnprintf(buf
+ i
, sz
- i
,
2628 "backlogq: <%u %u %u %u %u>, ",
2629 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
,
2630 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
,
2631 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
,
2632 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
,
2633 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
);
2634 i
+= tipc_list_dump(&l
->backlogq
, false, buf
+ i
);
2636 if (dqueues
& TIPC_DUMP_DEFERDQ
) {
2637 i
+= scnprintf(buf
+ i
, sz
- i
, "deferdq: ");
2638 i
+= tipc_list_dump(&l
->deferdq
, false, buf
+ i
);
2640 if (dqueues
& TIPC_DUMP_INPUTQ
) {
2641 i
+= scnprintf(buf
+ i
, sz
- i
, "inputq: ");
2642 i
+= tipc_list_dump(l
->inputq
, false, buf
+ i
);
2644 if (dqueues
& TIPC_DUMP_WAKEUP
) {
2645 i
+= scnprintf(buf
+ i
, sz
- i
, "wakeup: ");
2646 i
+= tipc_list_dump(&l
->wakeupq
, false, buf
+ i
);