]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: hsr: serialize seq_blocks merge across nodes
authorLuka Gejak <luka.gejak@linux.dev>
Wed, 1 Apr 2026 09:22:42 +0000 (11:22 +0200)
committerJakub Kicinski <kuba@kernel.org>
Thu, 2 Apr 2026 15:23:49 +0000 (08:23 -0700)
During node merging, hsr_handle_sup_frame() walks node_curr->seq_blocks
to update node_real without holding node_curr->seq_out_lock. This
allows concurrent mutations from duplicate registration paths, risking
inconsistent state or XArray/bitmap corruption.

Fix this by locking both nodes' seq_out_lock during the merge.
To prevent ABBA deadlocks, locks are acquired in order of memory
address.

Reviewed-by: Felix Maurer <fmaurer@redhat.com>
Fixes: 415e6367512b ("hsr: Implement more robust duplicate discard for PRP")
Signed-off-by: Luka Gejak <luka.gejak@linux.dev>
Link: https://patch.msgid.link/20260401092243.52121-2-luka.gejak@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/hsr/hsr_framereg.c

index 50996f4de7f9e725ff11a1d4d74839c199725c16..d418635936743ac0e2e508ac340e9cdca6857202 100644 (file)
@@ -123,6 +123,40 @@ static void hsr_free_node_rcu(struct rcu_head *rn)
        hsr_free_node(node);
 }
 
+static void hsr_lock_seq_out_pair(struct hsr_node *node_a,
+                                 struct hsr_node *node_b)
+{
+       if (node_a == node_b) {
+               spin_lock_bh(&node_a->seq_out_lock);
+               return;
+       }
+
+       if (node_a < node_b) {
+               spin_lock_bh(&node_a->seq_out_lock);
+               spin_lock_nested(&node_b->seq_out_lock, SINGLE_DEPTH_NESTING);
+       } else {
+               spin_lock_bh(&node_b->seq_out_lock);
+               spin_lock_nested(&node_a->seq_out_lock, SINGLE_DEPTH_NESTING);
+       }
+}
+
+static void hsr_unlock_seq_out_pair(struct hsr_node *node_a,
+                                   struct hsr_node *node_b)
+{
+       if (node_a == node_b) {
+               spin_unlock_bh(&node_a->seq_out_lock);
+               return;
+       }
+
+       if (node_a < node_b) {
+               spin_unlock(&node_b->seq_out_lock);
+               spin_unlock_bh(&node_a->seq_out_lock);
+       } else {
+               spin_unlock(&node_a->seq_out_lock);
+               spin_unlock_bh(&node_b->seq_out_lock);
+       }
+}
+
 void hsr_del_nodes(struct list_head *node_db)
 {
        struct hsr_node *node;
@@ -432,7 +466,7 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
        }
 
        ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
-       spin_lock_bh(&node_real->seq_out_lock);
+       hsr_lock_seq_out_pair(node_real, node_curr);
        for (i = 0; i < HSR_PT_PORTS; i++) {
                if (!node_curr->time_in_stale[i] &&
                    time_after(node_curr->time_in[i], node_real->time_in[i])) {
@@ -455,7 +489,7 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
                                  src_blk->seq_nrs[i], HSR_SEQ_BLOCK_SIZE);
                }
        }
-       spin_unlock_bh(&node_real->seq_out_lock);
+       hsr_unlock_seq_out_pair(node_real, node_curr);
        node_real->addr_B_port = port_rcv->type;
 
        spin_lock_bh(&hsr->list_lock);