]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
udp: Fix rcv socket locking
authorHerbert Xu <herbert@gondor.apana.org.au>
Mon, 15 Sep 2008 18:48:46 +0000 (11:48 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 9 Oct 2008 02:44:45 +0000 (19:44 -0700)
[ Upstream commits d97106ea52aa57e63ff40d04479016836bbb5a4e and
   93821778def10ec1e69aa3ac10adee975dad4ff3 ]

The previous patch in response to the recursive locking on IPsec
reception is broken as it tries to drop the BH socket lock while in
user context.

This patch fixes it by shrinking the section protected by the
socket lock to sock_queue_rcv_skb only.  The only reason we added
the lock is for the accounting which happens in that function.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
net/ipv4/udp.c
net/ipv6/udp.c

index 9703c870f1dfa00f88a4d0c6c24c7adfca9f0332..b924502b5e1e6bb67a73476818fe5066e96ca737 100644 (file)
@@ -956,6 +956,27 @@ int udp_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
+static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+       int is_udplite = IS_UDPLITE(sk);
+       int rc;
+
+       if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
+               /* Note that an ENOMEM error is charged twice */
+               if (rc == -ENOMEM)
+                       UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS,
+                                        is_udplite);
+               goto drop;
+       }
+
+       return 0;
+
+drop:
+       UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
+       kfree_skb(skb);
+       return -1;
+}
+
 /* returns:
  *  -1: error
  *   0: success
@@ -1046,14 +1067,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
-               /* Note that an ENOMEM error is charged twice */
-               if (rc == -ENOMEM)
-                       UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
-               goto drop;
-       }
+       rc = 0;
 
-       return 0;
+       bh_lock_sock(sk);
+       if (!sock_owned_by_user(sk))
+               rc = __udp_queue_rcv_skb(sk, skb);
+       else
+               sk_add_backlog(sk, skb);
+       bh_unlock_sock(sk);
+
+       return rc;
 
 drop:
        UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
@@ -1091,15 +1114,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
                                skb1 = skb_clone(skb, GFP_ATOMIC);
 
                        if (skb1) {
-                               int ret = 0;
-
-                               bh_lock_sock_nested(sk);
-                               if (!sock_owned_by_user(sk))
-                                       ret = udp_queue_rcv_skb(sk, skb1);
-                               else
-                                       sk_add_backlog(sk, skb1);
-                               bh_unlock_sock(sk);
-
+                               int ret = udp_queue_rcv_skb(sk, skb1);
                                if (ret > 0)
                                        /* we should probably re-process instead
                                         * of dropping packets here. */
@@ -1192,13 +1207,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
                        uh->dest, inet_iif(skb), udptable);
 
        if (sk != NULL) {
-               int ret = 0;
-               bh_lock_sock_nested(sk);
-               if (!sock_owned_by_user(sk))
-                       ret = udp_queue_rcv_skb(sk, skb);
-               else
-                       sk_add_backlog(sk, skb);
-               bh_unlock_sock(sk);
+               int ret = udp_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
                /* a return value > 0 means to resubmit the input, but
@@ -1493,7 +1502,7 @@ struct proto udp_prot = {
        .sendmsg           = udp_sendmsg,
        .recvmsg           = udp_recvmsg,
        .sendpage          = udp_sendpage,
-       .backlog_rcv       = udp_queue_rcv_skb,
+       .backlog_rcv       = __udp_queue_rcv_skb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
        .get_port          = udp_v4_get_port,
index 53739de829db07eb8ad1c3b9ab2b25d4e2b162c5..4e36c57dc24297d1138d617296598fe81622818b 100644 (file)
@@ -373,7 +373,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
                                        uh->source, saddr, dif))) {
                struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
                if (buff) {
-                       bh_lock_sock_nested(sk2);
+                       bh_lock_sock(sk2);
                        if (!sock_owned_by_user(sk2))
                                udpv6_queue_rcv_skb(sk2, buff);
                        else
@@ -381,7 +381,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
                        bh_unlock_sock(sk2);
                }
        }
-       bh_lock_sock_nested(sk);
+       bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                udpv6_queue_rcv_skb(sk, skb);
        else
@@ -499,7 +499,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
 
        /* deliver */
 
-       bh_lock_sock_nested(sk);
+       bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                udpv6_queue_rcv_skb(sk, skb);
        else