]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: devmem: use READ_ONCE/WRITE_ONCE on binding->dev
authorBobby Eshleman <bobbyeshleman@meta.com>
Tue, 3 Mar 2026 00:32:56 +0000 (16:32 -0800)
committerJakub Kicinski <kuba@kernel.org>
Thu, 5 Mar 2026 01:59:27 +0000 (17:59 -0800)
binding->dev is protected on the write-side in
mp_dmabuf_devmem_uninstall() against concurrent writes, but due to the
concurrent bare reads in net_devmem_get_binding() and
validate_xmit_unreadable_skb() it should be wrapped in a
READ_ONCE/WRITE_ONCE pair to make sure no compiler optimizations play
with the underlying register in unforeseen ways.

Doesn't present a critical bug because the known compiler optimizations
don't result in bad behavior. There is no tearing on u64, and load
omissions/invented loads would only break if additional binding->dev
references were inlined together (they aren't right now).

This just more strictly follows the linux memory model (i.e.,
"Lock-Protected Writes With Lockless Reads" in
tools/memory-model/Documentation/access-marking.txt).

Fixes: bd61848900bf ("net: devmem: Implement TX path")
Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com>
Link: https://patch.msgid.link/20260302-devmem-membar-fix-v2-1-5b33c9cbc28b@meta.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/dev.c
net/core/devmem.c

index 4af4cf2d63a47c2038ab082636f5f75f1e86e770..20610a192ec7a1d23257797056e8e6e3132bab1b 100644 (file)
@@ -3987,7 +3987,7 @@ static struct sk_buff *validate_xmit_unreadable_skb(struct sk_buff *skb,
        if (shinfo->nr_frags > 0) {
                niov = netmem_to_net_iov(skb_frag_netmem(&shinfo->frags[0]));
                if (net_is_devmem_iov(niov) &&
-                   net_devmem_iov_binding(niov)->dev != dev)
+                   READ_ONCE(net_devmem_iov_binding(niov)->dev) != dev)
                        goto out_free;
        }
 
index 8c9aad776bf43f9979692429982687226ff12612..69d79aee07ef6f25c6f14415e801b8e00a7e5b94 100644 (file)
@@ -396,7 +396,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
         * net_device.
         */
        dst_dev = dst_dev_rcu(dst);
-       if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {
+       if (unlikely(!dst_dev) ||
+           unlikely(dst_dev != READ_ONCE(binding->dev))) {
                err = -ENODEV;
                goto out_unlock;
        }
@@ -513,7 +514,8 @@ static void mp_dmabuf_devmem_uninstall(void *mp_priv,
                        xa_erase(&binding->bound_rxqs, xa_idx);
                        if (xa_empty(&binding->bound_rxqs)) {
                                mutex_lock(&binding->lock);
-                               binding->dev = NULL;
+                               ASSERT_EXCLUSIVE_WRITER(binding->dev);
+                               WRITE_ONCE(binding->dev, NULL);
                                mutex_unlock(&binding->lock);
                        }
                        break;