]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
xsk: proper AF_XDP socket teardown ordering
authorBjörn Töpel <bjorn.topel@intel.com>
Fri, 5 Oct 2018 11:25:15 +0000 (13:25 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 24 Nov 2019 07:20:32 +0000 (08:20 +0100)
[ Upstream commit 541d7fdd7694560404c502f64298a90ffe017e6b ]

The AF_XDP socket struct can exist in three different, implicit
states: setup, bound and released. Setup is prior the socket has been
bound to a device. Bound is when the socket is active for receive and
send. Released is when the process/userspace side of the socket is
released, but the sock object is still lingering, e.g. when there is a
reference to the socket in an XSKMAP after process termination.

The Rx fast-path code uses the "dev" member of struct xdp_sock to
check whether a socket is bound or relased, and the Tx code uses the
struct xdp_umem "xsk_list" member in conjunction with "dev" to
determine the state of a socket.

However, the transition from bound to released did not tear the socket
down in correct order.

On the Rx side "dev" was cleared after synchronize_net() making the
synchronization useless. On the Tx side, the internal queues were
destroyed prior removing them from the "xsk_list".

This commit corrects the cleanup order, and by doing so
xdp_del_sk_umem() can be simplified and one synchronize_net() can be
removed.

Fixes: 965a99098443 ("xsk: add support for bind for Rx")
Fixes: ac98d8aab61b ("xsk: wire upp Tx zero-copy functions")
Reported-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/xdp/xdp_umem.c
net/xdp/xsk.c

index 8cab91c482ff5f09f381cdf93318947a9a426170..d9117ab035f7c5dea2455d88f3820be84de2fb99 100644 (file)
@@ -32,14 +32,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
 {
        unsigned long flags;
 
-       if (xs->dev) {
-               spin_lock_irqsave(&umem->xsk_list_lock, flags);
-               list_del_rcu(&xs->list);
-               spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
-
-               if (umem->zc)
-                       synchronize_net();
-       }
+       spin_lock_irqsave(&umem->xsk_list_lock, flags);
+       list_del_rcu(&xs->list);
+       spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
 }
 
 int xdp_umem_query(struct net_device *dev, u16 queue_id)
index 661504042d3040dd01cf3ef56955d6f93b467e76..ff15207036dc5ca27430db98e8f704fab422d56c 100644 (file)
@@ -343,12 +343,18 @@ static int xsk_release(struct socket *sock)
        local_bh_enable();
 
        if (xs->dev) {
+               struct net_device *dev = xs->dev;
+
                /* Wait for driver to stop using the xdp socket. */
-               synchronize_net();
-               dev_put(xs->dev);
+               xdp_del_sk_umem(xs->umem, xs);
                xs->dev = NULL;
+               synchronize_net();
+               dev_put(dev);
        }
 
+       xskq_destroy(xs->rx);
+       xskq_destroy(xs->tx);
+
        sock_orphan(sk);
        sock->sk = NULL;
 
@@ -707,9 +713,6 @@ static void xsk_destruct(struct sock *sk)
        if (!sock_flag(sk, SOCK_DEAD))
                return;
 
-       xskq_destroy(xs->rx);
-       xskq_destroy(xs->tx);
-       xdp_del_sk_umem(xs->umem, xs);
        xdp_put_umem(xs->umem);
 
        sk_refcnt_debug_dec(sk);