]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/tipc/msg.c
tipc: fix UAF in error path
[thirdparty/kernel/stable.git] / net / tipc / msg.c
1 /*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <net/sock.h>
38 #include "core.h"
39 #include "msg.h"
40 #include "addr.h"
41 #include "name_table.h"
42 #include "crypto.h"
43
44 #define BUF_ALIGN(x) ALIGN(x, 4)
45 #define MAX_FORWARD_SIZE 1024
46 #ifdef CONFIG_TIPC_CRYPTO
47 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
48 #define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
49 #else
50 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
51 #define BUF_OVERHEAD BUF_HEADROOM
52 #endif
53
54 const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
56
57 /**
58 * tipc_buf_acquire - creates a TIPC message buffer
59 * @size: message size (including TIPC header)
60 * @gfp: memory allocation flags
61 *
62 * Return: a new buffer with data pointers set to the specified size.
63 *
64 * NOTE:
65 * Headroom is reserved to allow prepending of a data link header.
66 * There may also be unrequested tailroom present at the buffer's end.
67 */
68 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
69 {
70 struct sk_buff *skb;
71
72 skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
73 if (skb) {
74 skb_reserve(skb, BUF_HEADROOM);
75 skb_put(skb, size);
76 skb->next = NULL;
77 }
78 return skb;
79 }
80
81 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
82 u32 hsize, u32 dnode)
83 {
84 memset(m, 0, hsize);
85 msg_set_version(m);
86 msg_set_user(m, user);
87 msg_set_hdr_sz(m, hsize);
88 msg_set_size(m, hsize);
89 msg_set_prevnode(m, own_node);
90 msg_set_type(m, type);
91 if (hsize > SHORT_H_SIZE) {
92 msg_set_orignode(m, own_node);
93 msg_set_destnode(m, dnode);
94 }
95 }
96
97 struct sk_buff *tipc_msg_create(uint user, uint type,
98 uint hdr_sz, uint data_sz, u32 dnode,
99 u32 onode, u32 dport, u32 oport, int errcode)
100 {
101 struct tipc_msg *msg;
102 struct sk_buff *buf;
103
104 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
105 if (unlikely(!buf))
106 return NULL;
107
108 msg = buf_msg(buf);
109 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
110 msg_set_size(msg, hdr_sz + data_sz);
111 msg_set_origport(msg, oport);
112 msg_set_destport(msg, dport);
113 msg_set_errcode(msg, errcode);
114 return buf;
115 }
116
117 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
118 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
119 * out: set when successful non-complete reassembly, otherwise NULL
120 * @*buf: in: the buffer to append. Always defined
121 * out: head buf after successful complete reassembly, otherwise NULL
122 * Returns 1 when reassembly complete, otherwise 0
123 */
124 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
125 {
126 struct sk_buff *head = *headbuf;
127 struct sk_buff *frag = *buf;
128 struct sk_buff *tail = NULL;
129 struct tipc_msg *msg;
130 u32 fragid;
131 int delta;
132 bool headstolen;
133
134 if (!frag)
135 goto err;
136
137 msg = buf_msg(frag);
138 fragid = msg_type(msg);
139 frag->next = NULL;
140 skb_pull(frag, msg_hdr_sz(msg));
141
142 if (fragid == FIRST_FRAGMENT) {
143 if (unlikely(head))
144 goto err;
145 *buf = NULL;
146 if (skb_has_frag_list(frag) && __skb_linearize(frag))
147 goto err;
148 frag = skb_unshare(frag, GFP_ATOMIC);
149 if (unlikely(!frag))
150 goto err;
151 head = *headbuf = frag;
152 TIPC_SKB_CB(head)->tail = NULL;
153 return 0;
154 }
155
156 if (!head)
157 goto err;
158
159 /* Either the input skb ownership is transferred to headskb
160 * or the input skb is freed, clear the reference to avoid
161 * bad access on error path.
162 */
163 *buf = NULL;
164 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
165 kfree_skb_partial(frag, headstolen);
166 } else {
167 tail = TIPC_SKB_CB(head)->tail;
168 if (!skb_has_frag_list(head))
169 skb_shinfo(head)->frag_list = frag;
170 else
171 tail->next = frag;
172 head->truesize += frag->truesize;
173 head->data_len += frag->len;
174 head->len += frag->len;
175 TIPC_SKB_CB(head)->tail = frag;
176 }
177
178 if (fragid == LAST_FRAGMENT) {
179 TIPC_SKB_CB(head)->validated = 0;
180 if (unlikely(!tipc_msg_validate(&head)))
181 goto err;
182 *buf = head;
183 TIPC_SKB_CB(head)->tail = NULL;
184 *headbuf = NULL;
185 return 1;
186 }
187 return 0;
188 err:
189 kfree_skb(*buf);
190 kfree_skb(*headbuf);
191 *buf = *headbuf = NULL;
192 return 0;
193 }
194
195 /**
196 * tipc_msg_append(): Append data to tail of an existing buffer queue
197 * @_hdr: header to be used
198 * @m: the data to be appended
199 * @mss: max allowable size of buffer
200 * @dlen: size of data to be appended
201 * @txq: queue to append to
202 *
203 * Return: the number of 1k blocks appended or errno value
204 */
205 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
206 int mss, struct sk_buff_head *txq)
207 {
208 struct sk_buff *skb;
209 int accounted, total, curr;
210 int mlen, cpy, rem = dlen;
211 struct tipc_msg *hdr;
212
213 skb = skb_peek_tail(txq);
214 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
215 total = accounted;
216
217 do {
218 if (!skb || skb->len >= mss) {
219 skb = tipc_buf_acquire(mss, GFP_KERNEL);
220 if (unlikely(!skb))
221 return -ENOMEM;
222 skb_orphan(skb);
223 skb_trim(skb, MIN_H_SIZE);
224 hdr = buf_msg(skb);
225 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
226 msg_set_hdr_sz(hdr, MIN_H_SIZE);
227 msg_set_size(hdr, MIN_H_SIZE);
228 __skb_queue_tail(txq, skb);
229 total += 1;
230 }
231 hdr = buf_msg(skb);
232 curr = msg_blocks(hdr);
233 mlen = msg_size(hdr);
234 cpy = min_t(size_t, rem, mss - mlen);
235 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
236 return -EFAULT;
237 msg_set_size(hdr, mlen + cpy);
238 skb_put(skb, cpy);
239 rem -= cpy;
240 total += msg_blocks(hdr) - curr;
241 } while (rem > 0);
242 return total - accounted;
243 }
244
245 /* tipc_msg_validate - validate basic format of received message
246 *
247 * This routine ensures a TIPC message has an acceptable header, and at least
248 * as much data as the header indicates it should. The routine also ensures
249 * that the entire message header is stored in the main fragment of the message
250 * buffer, to simplify future access to message header fields.
251 *
252 * Note: Having extra info present in the message header or data areas is OK.
253 * TIPC will ignore the excess, under the assumption that it is optional info
254 * introduced by a later release of the protocol.
255 */
256 bool tipc_msg_validate(struct sk_buff **_skb)
257 {
258 struct sk_buff *skb = *_skb;
259 struct tipc_msg *hdr;
260 int msz, hsz;
261
262 /* Ensure that flow control ratio condition is satisfied */
263 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
264 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
265 if (!skb)
266 return false;
267 kfree_skb(*_skb);
268 *_skb = skb;
269 }
270
271 if (unlikely(TIPC_SKB_CB(skb)->validated))
272 return true;
273
274 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
275 return false;
276
277 hsz = msg_hdr_sz(buf_msg(skb));
278 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
279 return false;
280 if (unlikely(!pskb_may_pull(skb, hsz)))
281 return false;
282
283 hdr = buf_msg(skb);
284 if (unlikely(msg_version(hdr) != TIPC_VERSION))
285 return false;
286
287 msz = msg_size(hdr);
288 if (unlikely(msz < hsz))
289 return false;
290 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
291 return false;
292 if (unlikely(skb->len < msz))
293 return false;
294
295 TIPC_SKB_CB(skb)->validated = 1;
296 return true;
297 }
298
299 /**
300 * tipc_msg_fragment - build a fragment skb list for TIPC message
301 *
302 * @skb: TIPC message skb
303 * @hdr: internal msg header to be put on the top of the fragments
304 * @pktmax: max size of a fragment incl. the header
305 * @frags: returned fragment skb list
306 *
307 * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
308 * or -ENOMEM
309 */
310 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
311 int pktmax, struct sk_buff_head *frags)
312 {
313 int pktno, nof_fragms, dsz, dmax, eat;
314 struct tipc_msg *_hdr;
315 struct sk_buff *_skb;
316 u8 *data;
317
318 /* Non-linear buffer? */
319 if (skb_linearize(skb))
320 return -ENOMEM;
321
322 data = (u8 *)skb->data;
323 dsz = msg_size(buf_msg(skb));
324 dmax = pktmax - INT_H_SIZE;
325 if (dsz <= dmax || !dmax)
326 return -EINVAL;
327
328 nof_fragms = dsz / dmax + 1;
329 for (pktno = 1; pktno <= nof_fragms; pktno++) {
330 if (pktno < nof_fragms)
331 eat = dmax;
332 else
333 eat = dsz % dmax;
334 /* Allocate a new fragment */
335 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
336 if (!_skb)
337 goto error;
338 skb_orphan(_skb);
339 __skb_queue_tail(frags, _skb);
340 /* Copy header & data to the fragment */
341 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
342 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
343 data += eat;
344 /* Update the fragment's header */
345 _hdr = buf_msg(_skb);
346 msg_set_fragm_no(_hdr, pktno);
347 msg_set_nof_fragms(_hdr, nof_fragms);
348 msg_set_size(_hdr, INT_H_SIZE + eat);
349 }
350 return 0;
351
352 error:
353 __skb_queue_purge(frags);
354 __skb_queue_head_init(frags);
355 return -ENOMEM;
356 }
357
358 /**
359 * tipc_msg_build - create buffer chain containing specified header and data
360 * @mhdr: Message header, to be prepended to data
361 * @m: User message
362 * @offset: buffer offset for fragmented messages (FIXME)
363 * @dsz: Total length of user data
364 * @pktmax: Max packet size that can be used
365 * @list: Buffer or chain of buffers to be returned to caller
366 *
367 * Note that the recursive call we are making here is safe, since it can
368 * logically go only one further level down.
369 *
370 * Return: message data size or errno: -ENOMEM, -EFAULT
371 */
372 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
373 int dsz, int pktmax, struct sk_buff_head *list)
374 {
375 int mhsz = msg_hdr_sz(mhdr);
376 struct tipc_msg pkthdr;
377 int msz = mhsz + dsz;
378 int pktrem = pktmax;
379 struct sk_buff *skb;
380 int drem = dsz;
381 int pktno = 1;
382 char *pktpos;
383 int pktsz;
384 int rc;
385
386 msg_set_size(mhdr, msz);
387
388 /* No fragmentation needed? */
389 if (likely(msz <= pktmax)) {
390 skb = tipc_buf_acquire(msz, GFP_KERNEL);
391
392 /* Fall back to smaller MTU if node local message */
393 if (unlikely(!skb)) {
394 if (pktmax != MAX_MSG_SIZE)
395 return -ENOMEM;
396 rc = tipc_msg_build(mhdr, m, offset, dsz,
397 one_page_mtu, list);
398 if (rc != dsz)
399 return rc;
400 if (tipc_msg_assemble(list))
401 return dsz;
402 return -ENOMEM;
403 }
404 skb_orphan(skb);
405 __skb_queue_tail(list, skb);
406 skb_copy_to_linear_data(skb, mhdr, mhsz);
407 pktpos = skb->data + mhsz;
408 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
409 return dsz;
410 rc = -EFAULT;
411 goto error;
412 }
413
414 /* Prepare reusable fragment header */
415 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
416 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
417 msg_set_size(&pkthdr, pktmax);
418 msg_set_fragm_no(&pkthdr, pktno);
419 msg_set_importance(&pkthdr, msg_importance(mhdr));
420
421 /* Prepare first fragment */
422 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
423 if (!skb)
424 return -ENOMEM;
425 skb_orphan(skb);
426 __skb_queue_tail(list, skb);
427 pktpos = skb->data;
428 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
429 pktpos += INT_H_SIZE;
430 pktrem -= INT_H_SIZE;
431 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
432 pktpos += mhsz;
433 pktrem -= mhsz;
434
435 do {
436 if (drem < pktrem)
437 pktrem = drem;
438
439 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
440 rc = -EFAULT;
441 goto error;
442 }
443 drem -= pktrem;
444
445 if (!drem)
446 break;
447
448 /* Prepare new fragment: */
449 if (drem < (pktmax - INT_H_SIZE))
450 pktsz = drem + INT_H_SIZE;
451 else
452 pktsz = pktmax;
453 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
454 if (!skb) {
455 rc = -ENOMEM;
456 goto error;
457 }
458 skb_orphan(skb);
459 __skb_queue_tail(list, skb);
460 msg_set_type(&pkthdr, FRAGMENT);
461 msg_set_size(&pkthdr, pktsz);
462 msg_set_fragm_no(&pkthdr, ++pktno);
463 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
464 pktpos = skb->data + INT_H_SIZE;
465 pktrem = pktsz - INT_H_SIZE;
466
467 } while (1);
468 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
469 return dsz;
470 error:
471 __skb_queue_purge(list);
472 __skb_queue_head_init(list);
473 return rc;
474 }
475
476 /**
477 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
478 * @bskb: the bundle buffer to append to
479 * @msg: message to be appended
480 * @max: max allowable size for the bundle buffer
481 *
482 * Return: "true" if bundling has been performed, otherwise "false"
483 */
484 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
485 u32 max)
486 {
487 struct tipc_msg *bmsg = buf_msg(bskb);
488 u32 msz, bsz, offset, pad;
489
490 msz = msg_size(msg);
491 bsz = msg_size(bmsg);
492 offset = BUF_ALIGN(bsz);
493 pad = offset - bsz;
494
495 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
496 return false;
497 if (unlikely(max < (offset + msz)))
498 return false;
499
500 skb_put(bskb, pad + msz);
501 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
502 msg_set_size(bmsg, offset + msz);
503 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
504 return true;
505 }
506
507 /**
508 * tipc_msg_try_bundle - Try to bundle a new message to the last one
509 * @tskb: the last/target message to which the new one will be appended
510 * @skb: the new message skb pointer
511 * @mss: max message size (header inclusive)
512 * @dnode: destination node for the message
513 * @new_bundle: if this call made a new bundle or not
514 *
515 * Return: "true" if the new message skb is potential for bundling this time or
516 * later, in the case a bundling has been done this time, the skb is consumed
517 * (the skb pointer = NULL).
518 * Otherwise, "false" if the skb cannot be bundled at all.
519 */
520 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
521 u32 dnode, bool *new_bundle)
522 {
523 struct tipc_msg *msg, *inner, *outer;
524 u32 tsz;
525
526 /* First, check if the new buffer is suitable for bundling */
527 msg = buf_msg(*skb);
528 if (msg_user(msg) == MSG_FRAGMENTER)
529 return false;
530 if (msg_user(msg) == TUNNEL_PROTOCOL)
531 return false;
532 if (msg_user(msg) == BCAST_PROTOCOL)
533 return false;
534 if (mss <= INT_H_SIZE + msg_size(msg))
535 return false;
536
537 /* Ok, but the last/target buffer can be empty? */
538 if (unlikely(!tskb))
539 return true;
540
541 /* Is it a bundle already? Try to bundle the new message to it */
542 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
543 *new_bundle = false;
544 goto bundle;
545 }
546
547 /* Make a new bundle of the two messages if possible */
548 tsz = msg_size(buf_msg(tskb));
549 if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
550 return true;
551 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
552 GFP_ATOMIC)))
553 return true;
554 inner = buf_msg(tskb);
555 skb_push(tskb, INT_H_SIZE);
556 outer = buf_msg(tskb);
557 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
558 dnode);
559 msg_set_importance(outer, msg_importance(inner));
560 msg_set_size(outer, INT_H_SIZE + tsz);
561 msg_set_msgcnt(outer, 1);
562 *new_bundle = true;
563
564 bundle:
565 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
566 consume_skb(*skb);
567 *skb = NULL;
568 }
569 return true;
570 }
571
572 /**
573 * tipc_msg_extract(): extract bundled inner packet from buffer
574 * @skb: buffer to be extracted from.
575 * @iskb: extracted inner buffer, to be returned
576 * @pos: position in outer message of msg to be extracted.
577 * Returns position of next msg.
578 * Consumes outer buffer when last packet extracted
579 * Return: true when there is an extracted buffer, otherwise false
580 */
581 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
582 {
583 struct tipc_msg *hdr, *ihdr;
584 int imsz;
585
586 *iskb = NULL;
587 if (unlikely(skb_linearize(skb)))
588 goto none;
589
590 hdr = buf_msg(skb);
591 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
592 goto none;
593
594 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
595 imsz = msg_size(ihdr);
596
597 if ((*pos + imsz) > msg_data_sz(hdr))
598 goto none;
599
600 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
601 if (!*iskb)
602 goto none;
603
604 skb_copy_to_linear_data(*iskb, ihdr, imsz);
605 if (unlikely(!tipc_msg_validate(iskb)))
606 goto none;
607
608 *pos += BUF_ALIGN(imsz);
609 return true;
610 none:
611 kfree_skb(skb);
612 kfree_skb(*iskb);
613 *iskb = NULL;
614 return false;
615 }
616
617 /**
618 * tipc_msg_reverse(): swap source and destination addresses and add error code
619 * @own_node: originating node id for reversed message
620 * @skb: buffer containing message to be reversed; will be consumed
621 * @err: error code to be set in message, if any
622 * Replaces consumed buffer with new one when successful
623 * Return: true if success, otherwise false
624 */
625 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
626 {
627 struct sk_buff *_skb = *skb;
628 struct tipc_msg *_hdr, *hdr;
629 int hlen, dlen;
630
631 if (skb_linearize(_skb))
632 goto exit;
633 _hdr = buf_msg(_skb);
634 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
635 hlen = msg_hdr_sz(_hdr);
636
637 if (msg_dest_droppable(_hdr))
638 goto exit;
639 if (msg_errcode(_hdr))
640 goto exit;
641
642 /* Never return SHORT header */
643 if (hlen == SHORT_H_SIZE)
644 hlen = BASIC_H_SIZE;
645
646 /* Don't return data along with SYN+, - sender has a clone */
647 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
648 dlen = 0;
649
650 /* Allocate new buffer to return */
651 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
652 if (!*skb)
653 goto exit;
654 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
655 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
656
657 /* Build reverse header in new buffer */
658 hdr = buf_msg(*skb);
659 msg_set_hdr_sz(hdr, hlen);
660 msg_set_errcode(hdr, err);
661 msg_set_non_seq(hdr, 0);
662 msg_set_origport(hdr, msg_destport(_hdr));
663 msg_set_destport(hdr, msg_origport(_hdr));
664 msg_set_destnode(hdr, msg_prevnode(_hdr));
665 msg_set_prevnode(hdr, own_node);
666 msg_set_orignode(hdr, own_node);
667 msg_set_size(hdr, hlen + dlen);
668 skb_orphan(_skb);
669 kfree_skb(_skb);
670 return true;
671 exit:
672 kfree_skb(_skb);
673 *skb = NULL;
674 return false;
675 }
676
677 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
678 {
679 struct sk_buff *skb, *_skb;
680
681 skb_queue_walk(msg, skb) {
682 _skb = skb_clone(skb, GFP_ATOMIC);
683 if (!_skb) {
684 __skb_queue_purge(cpy);
685 pr_err_ratelimited("Failed to clone buffer chain\n");
686 return false;
687 }
688 __skb_queue_tail(cpy, _skb);
689 }
690 return true;
691 }
692
693 /**
694 * tipc_msg_lookup_dest(): try to find new destination for named message
695 * @net: pointer to associated network namespace
696 * @skb: the buffer containing the message.
697 * @err: error code to be used by caller if lookup fails
698 * Does not consume buffer
699 * Return: true if a destination is found, false otherwise
700 */
701 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
702 {
703 struct tipc_msg *msg = buf_msg(skb);
704 u32 scope = msg_lookup_scope(msg);
705 u32 self = tipc_own_addr(net);
706 u32 inst = msg_nameinst(msg);
707 struct tipc_socket_addr sk;
708 struct tipc_uaddr ua;
709
710 if (!msg_isdata(msg))
711 return false;
712 if (!msg_named(msg))
713 return false;
714 if (msg_errcode(msg))
715 return false;
716 *err = TIPC_ERR_NO_NAME;
717 if (skb_linearize(skb))
718 return false;
719 msg = buf_msg(skb);
720 if (msg_reroute_cnt(msg))
721 return false;
722 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
723 msg_nametype(msg), inst, inst);
724 sk.node = tipc_scope2node(net, scope);
725 if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
726 return false;
727 msg_incr_reroute_cnt(msg);
728 if (sk.node != self)
729 msg_set_prevnode(msg, self);
730 msg_set_destnode(msg, sk.node);
731 msg_set_destport(msg, sk.ref);
732 *err = TIPC_OK;
733
734 return true;
735 }
736
737 /* tipc_msg_assemble() - assemble chain of fragments into one message
738 */
739 bool tipc_msg_assemble(struct sk_buff_head *list)
740 {
741 struct sk_buff *skb, *tmp = NULL;
742
743 if (skb_queue_len(list) == 1)
744 return true;
745
746 while ((skb = __skb_dequeue(list))) {
747 skb->next = NULL;
748 if (tipc_buf_append(&tmp, &skb)) {
749 __skb_queue_tail(list, skb);
750 return true;
751 }
752 if (!tmp)
753 break;
754 }
755 __skb_queue_purge(list);
756 __skb_queue_head_init(list);
757 pr_warn("Failed do assemble buffer\n");
758 return false;
759 }
760
761 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
762 * reassemble the clones into one message
763 */
764 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
765 {
766 struct sk_buff *skb, *_skb;
767 struct sk_buff *frag = NULL;
768 struct sk_buff *head = NULL;
769 int hdr_len;
770
771 /* Copy header if single buffer */
772 if (skb_queue_len(list) == 1) {
773 skb = skb_peek(list);
774 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
775 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
776 if (!_skb)
777 return false;
778 __skb_queue_tail(rcvq, _skb);
779 return true;
780 }
781
782 /* Clone all fragments and reassemble */
783 skb_queue_walk(list, skb) {
784 frag = skb_clone(skb, GFP_ATOMIC);
785 if (!frag)
786 goto error;
787 frag->next = NULL;
788 if (tipc_buf_append(&head, &frag))
789 break;
790 if (!head)
791 goto error;
792 }
793 __skb_queue_tail(rcvq, frag);
794 return true;
795 error:
796 pr_warn("Failed do clone local mcast rcv buffer\n");
797 kfree_skb(head);
798 return false;
799 }
800
801 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
802 struct sk_buff_head *cpy)
803 {
804 struct sk_buff *skb, *_skb;
805
806 skb_queue_walk(msg, skb) {
807 _skb = pskb_copy(skb, GFP_ATOMIC);
808 if (!_skb) {
809 __skb_queue_purge(cpy);
810 return false;
811 }
812 msg_set_destnode(buf_msg(_skb), dst);
813 __skb_queue_tail(cpy, _skb);
814 }
815 return true;
816 }
817
818 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
819 * @list: list to be appended to
820 * @seqno: sequence number of buffer to add
821 * @skb: buffer to add
822 */
823 bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
824 struct sk_buff *skb)
825 {
826 struct sk_buff *_skb, *tmp;
827
828 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
829 __skb_queue_head(list, skb);
830 return true;
831 }
832
833 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
834 __skb_queue_tail(list, skb);
835 return true;
836 }
837
838 skb_queue_walk_safe(list, _skb, tmp) {
839 if (more(seqno, buf_seqno(_skb)))
840 continue;
841 if (seqno == buf_seqno(_skb))
842 break;
843 __skb_queue_before(list, _skb, skb);
844 return true;
845 }
846 kfree_skb(skb);
847 return false;
848 }
849
850 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
851 struct sk_buff_head *xmitq)
852 {
853 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
854 __skb_queue_tail(xmitq, skb);
855 }