]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/tipc/msg.c
2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_table.h"
44 #define BUF_ALIGN(x) ALIGN(x, 4)
45 #define MAX_FORWARD_SIZE 1024
46 #ifdef CONFIG_TIPC_CRYPTO
47 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
48 #define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
50 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
51 #define BUF_OVERHEAD BUF_HEADROOM
54 const int one_page_mtu
= PAGE_SIZE
- SKB_DATA_ALIGN(BUF_OVERHEAD
) -
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
58 * tipc_buf_acquire - creates a TIPC message buffer
59 * @size: message size (including TIPC header)
60 * @gfp: memory allocation flags
62 * Return: a new buffer with data pointers set to the specified size.
65 * Headroom is reserved to allow prepending of a data link header.
66 * There may also be unrequested tailroom present at the buffer's end.
68 struct sk_buff
*tipc_buf_acquire(u32 size
, gfp_t gfp
)
72 skb
= alloc_skb_fclone(BUF_OVERHEAD
+ size
, gfp
);
74 skb_reserve(skb
, BUF_HEADROOM
);
81 void tipc_msg_init(u32 own_node
, struct tipc_msg
*m
, u32 user
, u32 type
,
86 msg_set_user(m
, user
);
87 msg_set_hdr_sz(m
, hsize
);
88 msg_set_size(m
, hsize
);
89 msg_set_prevnode(m
, own_node
);
90 msg_set_type(m
, type
);
91 if (hsize
> SHORT_H_SIZE
) {
92 msg_set_orignode(m
, own_node
);
93 msg_set_destnode(m
, dnode
);
97 struct sk_buff
*tipc_msg_create(uint user
, uint type
,
98 uint hdr_sz
, uint data_sz
, u32 dnode
,
99 u32 onode
, u32 dport
, u32 oport
, int errcode
)
101 struct tipc_msg
*msg
;
104 buf
= tipc_buf_acquire(hdr_sz
+ data_sz
, GFP_ATOMIC
);
109 tipc_msg_init(onode
, msg
, user
, type
, hdr_sz
, dnode
);
110 msg_set_size(msg
, hdr_sz
+ data_sz
);
111 msg_set_origport(msg
, oport
);
112 msg_set_destport(msg
, dport
);
113 msg_set_errcode(msg
, errcode
);
117 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
118 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
119 * out: set when successful non-complete reassembly, otherwise NULL
120 * @*buf: in: the buffer to append. Always defined
121 * out: head buf after successful complete reassembly, otherwise NULL
122 * Returns 1 when reassembly complete, otherwise 0
124 int tipc_buf_append(struct sk_buff
**headbuf
, struct sk_buff
**buf
)
126 struct sk_buff
*head
= *headbuf
;
127 struct sk_buff
*frag
= *buf
;
128 struct sk_buff
*tail
= NULL
;
129 struct tipc_msg
*msg
;
138 fragid
= msg_type(msg
);
140 skb_pull(frag
, msg_hdr_sz(msg
));
142 if (fragid
== FIRST_FRAGMENT
) {
146 if (skb_has_frag_list(frag
) && __skb_linearize(frag
))
148 frag
= skb_unshare(frag
, GFP_ATOMIC
);
151 head
= *headbuf
= frag
;
152 TIPC_SKB_CB(head
)->tail
= NULL
;
159 /* Either the input skb ownership is transferred to headskb
160 * or the input skb is freed, clear the reference to avoid
161 * bad access on error path.
164 if (skb_try_coalesce(head
, frag
, &headstolen
, &delta
)) {
165 kfree_skb_partial(frag
, headstolen
);
167 tail
= TIPC_SKB_CB(head
)->tail
;
168 if (!skb_has_frag_list(head
))
169 skb_shinfo(head
)->frag_list
= frag
;
172 head
->truesize
+= frag
->truesize
;
173 head
->data_len
+= frag
->len
;
174 head
->len
+= frag
->len
;
175 TIPC_SKB_CB(head
)->tail
= frag
;
178 if (fragid
== LAST_FRAGMENT
) {
179 TIPC_SKB_CB(head
)->validated
= 0;
180 if (unlikely(!tipc_msg_validate(&head
)))
183 TIPC_SKB_CB(head
)->tail
= NULL
;
191 *buf
= *headbuf
= NULL
;
196 * tipc_msg_append(): Append data to tail of an existing buffer queue
197 * @_hdr: header to be used
198 * @m: the data to be appended
199 * @mss: max allowable size of buffer
200 * @dlen: size of data to be appended
201 * @txq: queue to append to
203 * Return: the number of 1k blocks appended or errno value
205 int tipc_msg_append(struct tipc_msg
*_hdr
, struct msghdr
*m
, int dlen
,
206 int mss
, struct sk_buff_head
*txq
)
209 int accounted
, total
, curr
;
210 int mlen
, cpy
, rem
= dlen
;
211 struct tipc_msg
*hdr
;
213 skb
= skb_peek_tail(txq
);
214 accounted
= skb
? msg_blocks(buf_msg(skb
)) : 0;
218 if (!skb
|| skb
->len
>= mss
) {
219 skb
= tipc_buf_acquire(mss
, GFP_KERNEL
);
223 skb_trim(skb
, MIN_H_SIZE
);
225 skb_copy_to_linear_data(skb
, _hdr
, MIN_H_SIZE
);
226 msg_set_hdr_sz(hdr
, MIN_H_SIZE
);
227 msg_set_size(hdr
, MIN_H_SIZE
);
228 __skb_queue_tail(txq
, skb
);
232 curr
= msg_blocks(hdr
);
233 mlen
= msg_size(hdr
);
234 cpy
= min_t(size_t, rem
, mss
- mlen
);
235 if (cpy
!= copy_from_iter(skb
->data
+ mlen
, cpy
, &m
->msg_iter
))
237 msg_set_size(hdr
, mlen
+ cpy
);
240 total
+= msg_blocks(hdr
) - curr
;
242 return total
- accounted
;
245 /* tipc_msg_validate - validate basic format of received message
247 * This routine ensures a TIPC message has an acceptable header, and at least
248 * as much data as the header indicates it should. The routine also ensures
249 * that the entire message header is stored in the main fragment of the message
250 * buffer, to simplify future access to message header fields.
252 * Note: Having extra info present in the message header or data areas is OK.
253 * TIPC will ignore the excess, under the assumption that it is optional info
254 * introduced by a later release of the protocol.
256 bool tipc_msg_validate(struct sk_buff
**_skb
)
258 struct sk_buff
*skb
= *_skb
;
259 struct tipc_msg
*hdr
;
262 /* Ensure that flow control ratio condition is satisfied */
263 if (unlikely(skb
->truesize
/ buf_roundup_len(skb
) >= 4)) {
264 skb
= skb_copy_expand(skb
, BUF_HEADROOM
, 0, GFP_ATOMIC
);
271 if (unlikely(TIPC_SKB_CB(skb
)->validated
))
274 if (unlikely(!pskb_may_pull(skb
, MIN_H_SIZE
)))
277 hsz
= msg_hdr_sz(buf_msg(skb
));
278 if (unlikely(hsz
< MIN_H_SIZE
) || (hsz
> MAX_H_SIZE
))
280 if (unlikely(!pskb_may_pull(skb
, hsz
)))
284 if (unlikely(msg_version(hdr
) != TIPC_VERSION
))
288 if (unlikely(msz
< hsz
))
290 if (unlikely((msz
- hsz
) > TIPC_MAX_USER_MSG_SIZE
))
292 if (unlikely(skb
->len
< msz
))
295 TIPC_SKB_CB(skb
)->validated
= 1;
300 * tipc_msg_fragment - build a fragment skb list for TIPC message
302 * @skb: TIPC message skb
303 * @hdr: internal msg header to be put on the top of the fragments
304 * @pktmax: max size of a fragment incl. the header
305 * @frags: returned fragment skb list
307 * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
310 int tipc_msg_fragment(struct sk_buff
*skb
, const struct tipc_msg
*hdr
,
311 int pktmax
, struct sk_buff_head
*frags
)
313 int pktno
, nof_fragms
, dsz
, dmax
, eat
;
314 struct tipc_msg
*_hdr
;
315 struct sk_buff
*_skb
;
318 /* Non-linear buffer? */
319 if (skb_linearize(skb
))
322 data
= (u8
*)skb
->data
;
323 dsz
= msg_size(buf_msg(skb
));
324 dmax
= pktmax
- INT_H_SIZE
;
325 if (dsz
<= dmax
|| !dmax
)
328 nof_fragms
= dsz
/ dmax
+ 1;
329 for (pktno
= 1; pktno
<= nof_fragms
; pktno
++) {
330 if (pktno
< nof_fragms
)
334 /* Allocate a new fragment */
335 _skb
= tipc_buf_acquire(INT_H_SIZE
+ eat
, GFP_ATOMIC
);
339 __skb_queue_tail(frags
, _skb
);
340 /* Copy header & data to the fragment */
341 skb_copy_to_linear_data(_skb
, hdr
, INT_H_SIZE
);
342 skb_copy_to_linear_data_offset(_skb
, INT_H_SIZE
, data
, eat
);
344 /* Update the fragment's header */
345 _hdr
= buf_msg(_skb
);
346 msg_set_fragm_no(_hdr
, pktno
);
347 msg_set_nof_fragms(_hdr
, nof_fragms
);
348 msg_set_size(_hdr
, INT_H_SIZE
+ eat
);
353 __skb_queue_purge(frags
);
354 __skb_queue_head_init(frags
);
359 * tipc_msg_build - create buffer chain containing specified header and data
360 * @mhdr: Message header, to be prepended to data
362 * @offset: buffer offset for fragmented messages (FIXME)
363 * @dsz: Total length of user data
364 * @pktmax: Max packet size that can be used
365 * @list: Buffer or chain of buffers to be returned to caller
367 * Note that the recursive call we are making here is safe, since it can
368 * logically go only one further level down.
370 * Return: message data size or errno: -ENOMEM, -EFAULT
372 int tipc_msg_build(struct tipc_msg
*mhdr
, struct msghdr
*m
, int offset
,
373 int dsz
, int pktmax
, struct sk_buff_head
*list
)
375 int mhsz
= msg_hdr_sz(mhdr
);
376 struct tipc_msg pkthdr
;
377 int msz
= mhsz
+ dsz
;
386 msg_set_size(mhdr
, msz
);
388 /* No fragmentation needed? */
389 if (likely(msz
<= pktmax
)) {
390 skb
= tipc_buf_acquire(msz
, GFP_KERNEL
);
392 /* Fall back to smaller MTU if node local message */
393 if (unlikely(!skb
)) {
394 if (pktmax
!= MAX_MSG_SIZE
)
396 rc
= tipc_msg_build(mhdr
, m
, offset
, dsz
,
400 if (tipc_msg_assemble(list
))
405 __skb_queue_tail(list
, skb
);
406 skb_copy_to_linear_data(skb
, mhdr
, mhsz
);
407 pktpos
= skb
->data
+ mhsz
;
408 if (copy_from_iter_full(pktpos
, dsz
, &m
->msg_iter
))
414 /* Prepare reusable fragment header */
415 tipc_msg_init(msg_prevnode(mhdr
), &pkthdr
, MSG_FRAGMENTER
,
416 FIRST_FRAGMENT
, INT_H_SIZE
, msg_destnode(mhdr
));
417 msg_set_size(&pkthdr
, pktmax
);
418 msg_set_fragm_no(&pkthdr
, pktno
);
419 msg_set_importance(&pkthdr
, msg_importance(mhdr
));
421 /* Prepare first fragment */
422 skb
= tipc_buf_acquire(pktmax
, GFP_KERNEL
);
426 __skb_queue_tail(list
, skb
);
428 skb_copy_to_linear_data(skb
, &pkthdr
, INT_H_SIZE
);
429 pktpos
+= INT_H_SIZE
;
430 pktrem
-= INT_H_SIZE
;
431 skb_copy_to_linear_data_offset(skb
, INT_H_SIZE
, mhdr
, mhsz
);
439 if (!copy_from_iter_full(pktpos
, pktrem
, &m
->msg_iter
)) {
448 /* Prepare new fragment: */
449 if (drem
< (pktmax
- INT_H_SIZE
))
450 pktsz
= drem
+ INT_H_SIZE
;
453 skb
= tipc_buf_acquire(pktsz
, GFP_KERNEL
);
459 __skb_queue_tail(list
, skb
);
460 msg_set_type(&pkthdr
, FRAGMENT
);
461 msg_set_size(&pkthdr
, pktsz
);
462 msg_set_fragm_no(&pkthdr
, ++pktno
);
463 skb_copy_to_linear_data(skb
, &pkthdr
, INT_H_SIZE
);
464 pktpos
= skb
->data
+ INT_H_SIZE
;
465 pktrem
= pktsz
- INT_H_SIZE
;
468 msg_set_type(buf_msg(skb
), LAST_FRAGMENT
);
471 __skb_queue_purge(list
);
472 __skb_queue_head_init(list
);
477 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
478 * @bskb: the bundle buffer to append to
479 * @msg: message to be appended
480 * @max: max allowable size for the bundle buffer
482 * Return: "true" if bundling has been performed, otherwise "false"
484 static bool tipc_msg_bundle(struct sk_buff
*bskb
, struct tipc_msg
*msg
,
487 struct tipc_msg
*bmsg
= buf_msg(bskb
);
488 u32 msz
, bsz
, offset
, pad
;
491 bsz
= msg_size(bmsg
);
492 offset
= BUF_ALIGN(bsz
);
495 if (unlikely(skb_tailroom(bskb
) < (pad
+ msz
)))
497 if (unlikely(max
< (offset
+ msz
)))
500 skb_put(bskb
, pad
+ msz
);
501 skb_copy_to_linear_data_offset(bskb
, offset
, msg
, msz
);
502 msg_set_size(bmsg
, offset
+ msz
);
503 msg_set_msgcnt(bmsg
, msg_msgcnt(bmsg
) + 1);
508 * tipc_msg_try_bundle - Try to bundle a new message to the last one
509 * @tskb: the last/target message to which the new one will be appended
510 * @skb: the new message skb pointer
511 * @mss: max message size (header inclusive)
512 * @dnode: destination node for the message
513 * @new_bundle: if this call made a new bundle or not
515 * Return: "true" if the new message skb is potential for bundling this time or
516 * later, in the case a bundling has been done this time, the skb is consumed
517 * (the skb pointer = NULL).
518 * Otherwise, "false" if the skb cannot be bundled at all.
520 bool tipc_msg_try_bundle(struct sk_buff
*tskb
, struct sk_buff
**skb
, u32 mss
,
521 u32 dnode
, bool *new_bundle
)
523 struct tipc_msg
*msg
, *inner
, *outer
;
526 /* First, check if the new buffer is suitable for bundling */
528 if (msg_user(msg
) == MSG_FRAGMENTER
)
530 if (msg_user(msg
) == TUNNEL_PROTOCOL
)
532 if (msg_user(msg
) == BCAST_PROTOCOL
)
534 if (mss
<= INT_H_SIZE
+ msg_size(msg
))
537 /* Ok, but the last/target buffer can be empty? */
541 /* Is it a bundle already? Try to bundle the new message to it */
542 if (msg_user(buf_msg(tskb
)) == MSG_BUNDLER
) {
547 /* Make a new bundle of the two messages if possible */
548 tsz
= msg_size(buf_msg(tskb
));
549 if (unlikely(mss
< BUF_ALIGN(INT_H_SIZE
+ tsz
) + msg_size(msg
)))
551 if (unlikely(pskb_expand_head(tskb
, INT_H_SIZE
, mss
- tsz
- INT_H_SIZE
,
554 inner
= buf_msg(tskb
);
555 skb_push(tskb
, INT_H_SIZE
);
556 outer
= buf_msg(tskb
);
557 tipc_msg_init(msg_prevnode(inner
), outer
, MSG_BUNDLER
, 0, INT_H_SIZE
,
559 msg_set_importance(outer
, msg_importance(inner
));
560 msg_set_size(outer
, INT_H_SIZE
+ tsz
);
561 msg_set_msgcnt(outer
, 1);
565 if (likely(tipc_msg_bundle(tskb
, msg
, mss
))) {
573 * tipc_msg_extract(): extract bundled inner packet from buffer
574 * @skb: buffer to be extracted from.
575 * @iskb: extracted inner buffer, to be returned
576 * @pos: position in outer message of msg to be extracted.
577 * Returns position of next msg.
578 * Consumes outer buffer when last packet extracted
579 * Return: true when there is an extracted buffer, otherwise false
581 bool tipc_msg_extract(struct sk_buff
*skb
, struct sk_buff
**iskb
, int *pos
)
583 struct tipc_msg
*hdr
, *ihdr
;
587 if (unlikely(skb_linearize(skb
)))
591 if (unlikely(*pos
> (msg_data_sz(hdr
) - MIN_H_SIZE
)))
594 ihdr
= (struct tipc_msg
*)(msg_data(hdr
) + *pos
);
595 imsz
= msg_size(ihdr
);
597 if ((*pos
+ imsz
) > msg_data_sz(hdr
))
600 *iskb
= tipc_buf_acquire(imsz
, GFP_ATOMIC
);
604 skb_copy_to_linear_data(*iskb
, ihdr
, imsz
);
605 if (unlikely(!tipc_msg_validate(iskb
)))
608 *pos
+= BUF_ALIGN(imsz
);
618 * tipc_msg_reverse(): swap source and destination addresses and add error code
619 * @own_node: originating node id for reversed message
620 * @skb: buffer containing message to be reversed; will be consumed
621 * @err: error code to be set in message, if any
622 * Replaces consumed buffer with new one when successful
623 * Return: true if success, otherwise false
625 bool tipc_msg_reverse(u32 own_node
, struct sk_buff
**skb
, int err
)
627 struct sk_buff
*_skb
= *skb
;
628 struct tipc_msg
*_hdr
, *hdr
;
631 if (skb_linearize(_skb
))
633 _hdr
= buf_msg(_skb
);
634 dlen
= min_t(uint
, msg_data_sz(_hdr
), MAX_FORWARD_SIZE
);
635 hlen
= msg_hdr_sz(_hdr
);
637 if (msg_dest_droppable(_hdr
))
639 if (msg_errcode(_hdr
))
642 /* Never return SHORT header */
643 if (hlen
== SHORT_H_SIZE
)
646 /* Don't return data along with SYN+, - sender has a clone */
647 if (msg_is_syn(_hdr
) && err
== TIPC_ERR_OVERLOAD
)
650 /* Allocate new buffer to return */
651 *skb
= tipc_buf_acquire(hlen
+ dlen
, GFP_ATOMIC
);
654 memcpy((*skb
)->data
, _skb
->data
, msg_hdr_sz(_hdr
));
655 memcpy((*skb
)->data
+ hlen
, msg_data(_hdr
), dlen
);
657 /* Build reverse header in new buffer */
659 msg_set_hdr_sz(hdr
, hlen
);
660 msg_set_errcode(hdr
, err
);
661 msg_set_non_seq(hdr
, 0);
662 msg_set_origport(hdr
, msg_destport(_hdr
));
663 msg_set_destport(hdr
, msg_origport(_hdr
));
664 msg_set_destnode(hdr
, msg_prevnode(_hdr
));
665 msg_set_prevnode(hdr
, own_node
);
666 msg_set_orignode(hdr
, own_node
);
667 msg_set_size(hdr
, hlen
+ dlen
);
677 bool tipc_msg_skb_clone(struct sk_buff_head
*msg
, struct sk_buff_head
*cpy
)
679 struct sk_buff
*skb
, *_skb
;
681 skb_queue_walk(msg
, skb
) {
682 _skb
= skb_clone(skb
, GFP_ATOMIC
);
684 __skb_queue_purge(cpy
);
685 pr_err_ratelimited("Failed to clone buffer chain\n");
688 __skb_queue_tail(cpy
, _skb
);
694 * tipc_msg_lookup_dest(): try to find new destination for named message
695 * @net: pointer to associated network namespace
696 * @skb: the buffer containing the message.
697 * @err: error code to be used by caller if lookup fails
698 * Does not consume buffer
699 * Return: true if a destination is found, false otherwise
701 bool tipc_msg_lookup_dest(struct net
*net
, struct sk_buff
*skb
, int *err
)
703 struct tipc_msg
*msg
= buf_msg(skb
);
704 u32 scope
= msg_lookup_scope(msg
);
705 u32 self
= tipc_own_addr(net
);
706 u32 inst
= msg_nameinst(msg
);
707 struct tipc_socket_addr sk
;
708 struct tipc_uaddr ua
;
710 if (!msg_isdata(msg
))
714 if (msg_errcode(msg
))
716 *err
= TIPC_ERR_NO_NAME
;
717 if (skb_linearize(skb
))
720 if (msg_reroute_cnt(msg
))
722 tipc_uaddr(&ua
, TIPC_SERVICE_RANGE
, scope
,
723 msg_nametype(msg
), inst
, inst
);
724 sk
.node
= tipc_scope2node(net
, scope
);
725 if (!tipc_nametbl_lookup_anycast(net
, &ua
, &sk
))
727 msg_incr_reroute_cnt(msg
);
729 msg_set_prevnode(msg
, self
);
730 msg_set_destnode(msg
, sk
.node
);
731 msg_set_destport(msg
, sk
.ref
);
737 /* tipc_msg_assemble() - assemble chain of fragments into one message
739 bool tipc_msg_assemble(struct sk_buff_head
*list
)
741 struct sk_buff
*skb
, *tmp
= NULL
;
743 if (skb_queue_len(list
) == 1)
746 while ((skb
= __skb_dequeue(list
))) {
748 if (tipc_buf_append(&tmp
, &skb
)) {
749 __skb_queue_tail(list
, skb
);
755 __skb_queue_purge(list
);
756 __skb_queue_head_init(list
);
757 pr_warn("Failed do assemble buffer\n");
761 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
762 * reassemble the clones into one message
764 bool tipc_msg_reassemble(struct sk_buff_head
*list
, struct sk_buff_head
*rcvq
)
766 struct sk_buff
*skb
, *_skb
;
767 struct sk_buff
*frag
= NULL
;
768 struct sk_buff
*head
= NULL
;
771 /* Copy header if single buffer */
772 if (skb_queue_len(list
) == 1) {
773 skb
= skb_peek(list
);
774 hdr_len
= skb_headroom(skb
) + msg_hdr_sz(buf_msg(skb
));
775 _skb
= __pskb_copy(skb
, hdr_len
, GFP_ATOMIC
);
778 __skb_queue_tail(rcvq
, _skb
);
782 /* Clone all fragments and reassemble */
783 skb_queue_walk(list
, skb
) {
784 frag
= skb_clone(skb
, GFP_ATOMIC
);
788 if (tipc_buf_append(&head
, &frag
))
793 __skb_queue_tail(rcvq
, frag
);
796 pr_warn("Failed do clone local mcast rcv buffer\n");
801 bool tipc_msg_pskb_copy(u32 dst
, struct sk_buff_head
*msg
,
802 struct sk_buff_head
*cpy
)
804 struct sk_buff
*skb
, *_skb
;
806 skb_queue_walk(msg
, skb
) {
807 _skb
= pskb_copy(skb
, GFP_ATOMIC
);
809 __skb_queue_purge(cpy
);
812 msg_set_destnode(buf_msg(_skb
), dst
);
813 __skb_queue_tail(cpy
, _skb
);
818 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
819 * @list: list to be appended to
820 * @seqno: sequence number of buffer to add
821 * @skb: buffer to add
823 bool __tipc_skb_queue_sorted(struct sk_buff_head
*list
, u16 seqno
,
826 struct sk_buff
*_skb
, *tmp
;
828 if (skb_queue_empty(list
) || less(seqno
, buf_seqno(skb_peek(list
)))) {
829 __skb_queue_head(list
, skb
);
833 if (more(seqno
, buf_seqno(skb_peek_tail(list
)))) {
834 __skb_queue_tail(list
, skb
);
838 skb_queue_walk_safe(list
, _skb
, tmp
) {
839 if (more(seqno
, buf_seqno(_skb
)))
841 if (seqno
== buf_seqno(_skb
))
843 __skb_queue_before(list
, _skb
, skb
);
850 void tipc_skb_reject(struct net
*net
, int err
, struct sk_buff
*skb
,
851 struct sk_buff_head
*xmitq
)
853 if (tipc_msg_reverse(tipc_own_addr(net
), &skb
, err
))
854 __skb_queue_tail(xmitq
, skb
);