]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
tcp: avoid integer overflows in tcp_rcv_space_adjust()
authorEric Dumazet <edumazet@google.com>
Mon, 11 Dec 2017 01:55:03 +0000 (17:55 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 5 Jun 2018 09:41:58 +0000 (11:41 +0200)
commit 607065bad9931e72207b0cac365d7d4abc06bd99 upstream.

When using large tcp_rmem[2] values (I did tests with 500 MB),
I noticed overflows while computing rcvwin.

Lets fix this before the following patch.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Wei Wang <weiwan@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
[Backport: sysctl_tcp_rmem is not Namespace-ify'd in older kernels]
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/tcp.h
net/ipv4/tcp_input.c

index e8418fc77a43f9baf676932c26b869ed96e16688..fe322fa611e663e50a67f5153f7a1d683e573d34 100644 (file)
@@ -334,7 +334,7 @@ struct tcp_sock {
 
 /* Receiver queue space */
        struct {
-               int     space;
+               u32     space;
                u32     seq;
                u64     time;
        } rcvq_space;
index ebbb54bcbcacd410451d968c5f9d2b32566d41d1..125b49c166a42ef949083f54a33d78ad0cab3a94 100644 (file)
@@ -591,8 +591,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 void tcp_rcv_space_adjust(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 copied;
        int time;
-       int copied;
 
        tcp_mstamp_refresh(tp);
        time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
@@ -615,12 +615,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
        if (sysctl_tcp_moderate_rcvbuf &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-               int rcvwin, rcvmem, rcvbuf;
+               int rcvmem, rcvbuf;
+               u64 rcvwin;
 
                /* minimal window to cope with packet losses, assuming
                 * steady state. Add some cushion because of small variations.
                 */
-               rcvwin = (copied << 1) + 16 * tp->advmss;
+               rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
 
                /* If rate increased by 25%,
                 *      assume slow start, rcvwin = 3 * copied
@@ -640,7 +641,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
                while (tcp_win_from_space(rcvmem) < tp->advmss)
                        rcvmem += 128;
 
-               rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
+               do_div(rcvwin, tp->advmss);
+               rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
                if (rcvbuf > sk->sk_rcvbuf) {
                        sk->sk_rcvbuf = rcvbuf;