]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/siw: Fix the sendmsg byte count in siw_tcp_sendpages
authorPedro Falcato <pfalcato@suse.de>
Tue, 29 Jul 2025 12:03:48 +0000 (13:03 +0100)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 5 Aug 2025 14:33:10 +0000 (11:33 -0300)
Ever since commit c2ff29e99a76 ("siw: Inline do_tcp_sendpages()"),
we have been doing this:

static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
                             size_t size)
[...]
        /* Calculate the number of bytes we need to push, for this page
         * specifically */
        size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
        /* If we can't splice it, then copy it in, as normal */
        if (!sendpage_ok(page[i]))
                msg.msg_flags &= ~MSG_SPLICE_PAGES;
        /* Set the bvec pointing to the page, with len $bytes */
        bvec_set_page(&bvec, page[i], bytes, offset);
        /* Set the iter to $size, aka the size of the whole sendpages (!!!) */
        iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
try_page_again:
        lock_sock(sk);
        /* Sendmsg with $size size (!!!) */
        rv = tcp_sendmsg_locked(sk, &msg, size);

This means we've been sending oversized iov_iters and tcp_sendmsg calls
for a while. This has a been a benign bug because sendpage_ok() always
returned true. With the recent slab allocator changes being slowly
introduced into next (that disallow sendpage on large kmalloc
allocations), we have recently hit out-of-bounds crashes, due to slight
differences in iov_iter behavior between the MSG_SPLICE_PAGES and
"regular" copy paths:

(MSG_SPLICE_PAGES)
skb_splice_from_iter
  iov_iter_extract_pages
    iov_iter_extract_bvec_pages
      uses i->nr_segs to correctly stop in its tracks before OoB'ing everywhere
  skb_splice_from_iter gets a "short" read

(!MSG_SPLICE_PAGES)
skb_copy_to_page_nocache copy=iov_iter_count
 [...]
   copy_from_iter
        /* this doesn't help */
        if (unlikely(iter->count < len))
                len = iter->count;
          iterate_bvec
            ... and we run off the bvecs

Fix this by properly setting the iov_iter's byte count, plus sending the
correct byte count to tcp_sendmsg_locked.

Link: https://patch.msgid.link/r/20250729120348.495568-1-pfalcato@suse.de
Cc: stable@vger.kernel.org
Fixes: c2ff29e99a76 ("siw: Inline do_tcp_sendpages()")
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202507220801.50a7210-lkp@intel.com
Reviewed-by: David Howells <dhowells@redhat.com>
Signed-off-by: Pedro Falcato <pfalcato@suse.de>
Acked-by: Bernard Metzler <bernard.metzler@linux.dev>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/siw/siw_qp_tx.c

index 3a08f57d22113be624a8848ef20a5d4d516a1d1b..f7dd32c6e5ba4db1b84880211adaf760725055a8 100644 (file)
@@ -340,18 +340,17 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
                if (!sendpage_ok(page[i]))
                        msg.msg_flags &= ~MSG_SPLICE_PAGES;
                bvec_set_page(&bvec, page[i], bytes, offset);
-               iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
+               iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, bytes);
 
 try_page_again:
                lock_sock(sk);
-               rv = tcp_sendmsg_locked(sk, &msg, size);
+               rv = tcp_sendmsg_locked(sk, &msg, bytes);
                release_sock(sk);
 
                if (rv > 0) {
                        size -= rv;
                        sent += rv;
                        if (rv != bytes) {
-                               offset += rv;
                                bytes -= rv;
                                goto try_page_again;
                        }