]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: flush_backlog() small changes
authorEric Dumazet <edumazet@google.com>
Tue, 4 Feb 2025 14:48:25 +0000 (14:48 +0000)
committerJakub Kicinski <kuba@kernel.org>
Thu, 6 Feb 2025 02:19:54 +0000 (18:19 -0800)
Add READ_ONCE() around reads of skb->dev->reg_state, because
this field can be changed from other threads/cpus.

Instead of calling dev_kfree_skb_irq() and kfree_skb()
while interrupts are masked and locks held,
use a temporary list and use __skb_queue_purge_reason()

Use SKB_DROP_REASON_DEV_READY drop reason to better
describe why these skbs are dropped.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jason Xing <kerneljasonxing@gmail.com>
Link: https://patch.msgid.link/20250204144825.316785-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/dev.c

index 2b141f20b13b5714de7e32aa5ad54ce3e72188b9..c41d1e1cbf62e0c5778c472cdb947b6f140f6064 100644 (file)
@@ -6119,16 +6119,18 @@ EXPORT_SYMBOL(netif_receive_skb_list);
 static void flush_backlog(struct work_struct *work)
 {
        struct sk_buff *skb, *tmp;
+       struct sk_buff_head list;
        struct softnet_data *sd;
 
+       __skb_queue_head_init(&list);
        local_bh_disable();
        sd = this_cpu_ptr(&softnet_data);
 
        backlog_lock_irq_disable(sd);
        skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
-               if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+               if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
                        __skb_unlink(skb, &sd->input_pkt_queue);
-                       dev_kfree_skb_irq(skb);
+                       __skb_queue_tail(&list, skb);
                        rps_input_queue_head_incr(sd);
                }
        }
@@ -6136,14 +6138,16 @@ static void flush_backlog(struct work_struct *work)
 
        local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
        skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
-               if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+               if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
                        __skb_unlink(skb, &sd->process_queue);
-                       kfree_skb(skb);
+                       __skb_queue_tail(&list, skb);
                        rps_input_queue_head_incr(sd);
                }
        }
        local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
        local_bh_enable();
+
+       __skb_queue_purge_reason(&list, SKB_DROP_REASON_DEV_READY);
 }
 
 static bool flush_required(int cpu)