]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mptcp: Use nested-BH locking for hmac_storage
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Mon, 12 May 2025 09:27:33 +0000 (11:27 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 15 May 2025 13:23:31 +0000 (15:23 +0200)
mptcp_delegated_actions is a per-CPU variable and relies on disabled BH for its
locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT this data
structure requires explicit locking.

Add a local_lock_t to the data structure and use local_lock_nested_bh() for
locking. This change adds only lockdep coverage and does not alter the
functional behaviour for !PREEMPT_RT.

Cc: Matthieu Baerts <matttbe@kernel.org>
Cc: Mat Martineau <martineau@kernel.org>
Cc: Geliang Tang <geliang@kernel.org>
Cc: mptcp@lists.linux.dev
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20250512092736.229935-13-bigeasy@linutronix.de
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/mptcp/protocol.c
net/mptcp/protocol.h

index c4fd558307f2091d4327e078cdcee9a6b81cf2df..0749733ea897bd0f0ec6f3165515166f145ae5ed 100644 (file)
@@ -46,7 +46,9 @@ static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_sm
 static void __mptcp_destroy_sock(struct sock *sk);
 static void mptcp_check_send_data_fin(struct sock *sk);
 
-DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
+DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = {
+       .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
 static struct net_device *mptcp_napi_dev;
 
 /* Returns end sequence number of the receiver's advertised window */
index 7aa38d74fef6b5f00d97a114d74b711014d0a52d..3dd11dd3ba16e8c1d3741b6eb5b526bb4beae15b 100644 (file)
@@ -479,6 +479,7 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
 
 struct mptcp_delegated_action {
        struct napi_struct napi;
+       local_lock_t bh_lock;
        struct list_head head;
 };
 
@@ -670,9 +671,11 @@ static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow,
                if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node)))
                        return;
 
+               local_lock_nested_bh(&mptcp_delegated_actions.bh_lock);
                delegated = this_cpu_ptr(&mptcp_delegated_actions);
                schedule = list_empty(&delegated->head);
                list_add_tail(&subflow->delegated_node, &delegated->head);
+               local_unlock_nested_bh(&mptcp_delegated_actions.bh_lock);
                sock_hold(mptcp_subflow_tcp_sock(subflow));
                if (schedule)
                        napi_schedule(&delegated->napi);
@@ -684,11 +687,15 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
 {
        struct mptcp_subflow_context *ret;
 
-       if (list_empty(&delegated->head))
+       local_lock_nested_bh(&mptcp_delegated_actions.bh_lock);
+       if (list_empty(&delegated->head)) {
+               local_unlock_nested_bh(&mptcp_delegated_actions.bh_lock);
                return NULL;
+       }
 
        ret = list_first_entry(&delegated->head, struct mptcp_subflow_context, delegated_node);
        list_del_init(&ret->delegated_node);
+       local_unlock_nested_bh(&mptcp_delegated_actions.bh_lock);
        return ret;
 }