]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: usb: Convert tasklet API to new bottom half workqueue mechanism
authorJun Miao <jun.miao@intel.com>
Wed, 18 Jun 2025 17:39:23 +0000 (13:39 -0400)
committerJakub Kicinski <kuba@kernel.org>
Thu, 19 Jun 2025 22:40:02 +0000 (15:40 -0700)
Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the usbnet driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Jun Miao <jun.miao@intel.com>
Link: https://patch.msgid.link/20250618173923.950510-1-jun.miao@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/usb/usbnet.c
include/linux/usb/usbnet.h

index c04e715a4c2ade3bc5587b0df71643a25cf88c55..9564478a79cc378b49db29e8f1d31d3ae792b5bd 100644 (file)
@@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
 
        __skb_queue_tail(&dev->done, skb);
        if (dev->done.qlen == 1)
-               tasklet_schedule(&dev->bh);
+               queue_work(system_bh_wq, &dev->bh_work);
        spin_unlock(&dev->done.lock);
        spin_unlock_irqrestore(&list->lock, flags);
        return old_state;
@@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                default:
                        netif_dbg(dev, rx_err, dev->net,
                                  "rx submit, %d\n", retval);
-                       tasklet_schedule (&dev->bh);
+                       queue_work(system_bh_wq, &dev->bh_work);
                        break;
                case 0:
                        __usbnet_queue_skb(&dev->rxq, skb, rx_start);
@@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev)
                num++;
        }
 
-       tasklet_schedule(&dev->bh);
+       queue_work(system_bh_wq, &dev->bh_work);
 
        netif_dbg(dev, rx_status, dev->net,
                  "paused rx queue disabled, %d skbs requeued\n", num);
@@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
 {
        if (netif_running(dev->net)) {
                (void) unlink_urbs (dev, &dev->rxq);
-               tasklet_schedule(&dev->bh);
+               queue_work(system_bh_wq, &dev->bh_work);
        }
 }
 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
        /* deferred work (timer, softirq, task) must also stop */
        dev->flags = 0;
        timer_delete_sync(&dev->delay);
-       tasklet_kill(&dev->bh);
+       disable_work_sync(&dev->bh_work);
        cancel_work_sync(&dev->kevent);
 
        /* We have cyclic dependencies. Those calls are needed
         * to break a cycle. We cannot fall into the gaps because
         * we have a flag
         */
-       tasklet_kill(&dev->bh);
+       disable_work_sync(&dev->bh_work);
        timer_delete_sync(&dev->delay);
        cancel_work_sync(&dev->kevent);
 
@@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
        clear_bit(EVENT_RX_KILL, &dev->flags);
 
        // delay posting reads until we're fully open
-       tasklet_schedule (&dev->bh);
+       queue_work(system_bh_wq, &dev->bh_work);
        if (info->manage_power) {
                retval = info->manage_power(dev, 1);
                if (retval < 0) {
@@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
                 */
        } else {
                /* submitting URBs for reading packets */
-               tasklet_schedule(&dev->bh);
+               queue_work(system_bh_wq, &dev->bh_work);
        }
 
        /* hard_mtu or rx_urb_size may change during link change */
@@ -1198,11 +1198,11 @@ fail_halt:
                } else {
                        clear_bit (EVENT_RX_HALT, &dev->flags);
                        if (!usbnet_going_away(dev))
-                               tasklet_schedule(&dev->bh);
+                               queue_work(system_bh_wq, &dev->bh_work);
                }
        }
 
-       /* tasklet could resubmit itself forever if memory is tight */
+       /* work could resubmit itself forever if memory is tight */
        if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
                struct urb      *urb = NULL;
                int resched = 1;
@@ -1224,7 +1224,7 @@ fail_halt:
 fail_lowmem:
                        if (resched)
                                if (!usbnet_going_away(dev))
-                                       tasklet_schedule(&dev->bh);
+                                       queue_work(system_bh_wq, &dev->bh_work);
                }
        }
 
@@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
        struct usbnet           *dev = netdev_priv(net);
 
        unlink_urbs (dev, &dev->txq);
-       tasklet_schedule (&dev->bh);
+       queue_work(system_bh_wq, &dev->bh_work);
        /* this needs to be handled individually because the generic layer
         * doesn't know what is sufficient and could not restore private
         * information if a remedy of an unconditional reset were used.
@@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
 
 /*-------------------------------------------------------------------------*/
 
-// tasklet (work deferred from completions, in_irq) or timer
+// work (work deferred from completions, in_irq) or timer
 
 static void usbnet_bh (struct timer_list *t)
 {
@@ -1601,16 +1601,16 @@ static void usbnet_bh (struct timer_list *t)
                                          "rxqlen %d --> %d\n",
                                          temp, dev->rxq.qlen);
                        if (dev->rxq.qlen < RX_QLEN(dev))
-                               tasklet_schedule (&dev->bh);
+                               queue_work(system_bh_wq, &dev->bh_work);
                }
                if (dev->txq.qlen < TX_QLEN (dev))
                        netif_wake_queue (dev->net);
        }
 }
 
-static void usbnet_bh_tasklet(struct tasklet_struct *t)
+static void usbnet_bh_work(struct work_struct *work)
 {
-       struct usbnet *dev = from_tasklet(dev, t, bh);
+       struct usbnet *dev = from_work(dev, work, bh_work);
 
        usbnet_bh(&dev->delay);
 }
@@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        skb_queue_head_init (&dev->txq);
        skb_queue_head_init (&dev->done);
        skb_queue_head_init(&dev->rxq_pause);
-       tasklet_setup(&dev->bh, usbnet_bh_tasklet);
+       INIT_WORK(&dev->bh_work, usbnet_bh_work);
        INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
        init_usb_anchor(&dev->deferred);
        timer_setup(&dev->delay, usbnet_bh, 0);
@@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf)
 
                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
                                netif_tx_wake_all_queues(dev->net);
-                       tasklet_schedule (&dev->bh);
+                       queue_work(system_bh_wq, &dev->bh_work);
                }
        }
 
index 0b9f1e598e3a6bb0285e35918a361d74cd1dd71f..208682f77179313b77c507f23b110821971c8293 100644 (file)
@@ -58,7 +58,7 @@ struct usbnet {
        unsigned                interrupt_count;
        struct mutex            interrupt_mutex;
        struct usb_anchor       deferred;
-       struct tasklet_struct   bh;
+       struct work_struct      bh_work;
 
        struct work_struct      kevent;
        unsigned long           flags;