]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
gpio: mpsse: ensure worker is torn down
authorMary Strodl <mstrodl@csh.rit.edu>
Tue, 14 Oct 2025 13:35:28 +0000 (09:35 -0400)
committerBartosz Golaszewski <bartosz.golaszewski@linaro.org>
Tue, 21 Oct 2025 12:10:45 +0000 (14:10 +0200)
When an IRQ worker is running, unplugging the device would cause a
crash. The sealevel hardware this driver was written for was not
hotpluggable, so I never realized it.

This change uses a spinlock to protect a list of workers, which
it tears down on disconnect.

Signed-off-by: Mary Strodl <mstrodl@csh.rit.edu>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://lore.kernel.org/r/20251014133530.3592716-3-mstrodl@csh.rit.edu
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
drivers/gpio/gpio-mpsse.c

index c508d9e330542bf76cdcb36a18caf6a0cdeac0b2..6ec940f6b3717d6b7d45d679d4a63b7de85e97b0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/cleanup.h>
 #include <linux/gpio/driver.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
 #include <linux/usb.h>
 
 struct mpsse_priv {
@@ -17,8 +18,10 @@ struct mpsse_priv {
        struct usb_device *udev;     /* USB device encompassing all MPSSEs */
        struct usb_interface *intf;  /* USB interface for this MPSSE */
        u8 intf_id;                  /* USB interface number for this MPSSE */
-       struct work_struct irq_work; /* polling work thread */
+       struct list_head workers;    /* polling work threads */
        struct mutex irq_mutex;      /* lock over irq_data */
+       struct mutex irq_race;       /* race for polling worker teardown */
+       raw_spinlock_t irq_spin;     /* protects worker list */
        atomic_t irq_type[16];       /* pin -> edge detection type */
        atomic_t irq_enabled;
        int id;
@@ -34,6 +37,14 @@ struct mpsse_priv {
        struct mutex io_mutex;      /* sync I/O with disconnect */
 };
 
+struct mpsse_worker {
+       struct mpsse_priv  *priv;
+       struct work_struct  work;
+       atomic_t       cancelled;
+       struct list_head    list;   /* linked list */
+       struct list_head destroy;   /* teardown linked list */
+};
+
 struct bulk_desc {
        bool tx;                    /* direction of bulk transfer */
        u8 *data;                   /* input (tx) or output (rx) */
@@ -283,18 +294,62 @@ static int gpio_mpsse_get_direction(struct gpio_chip *chip,
        return ret;
 }
 
-static void gpio_mpsse_poll(struct work_struct *work)
+/*
+ * Stops all workers except `my_worker`.
+ * Safe to call only when `irq_race` is held.
+ */
+static void gpio_mpsse_stop_all_except(struct mpsse_priv *priv,
+                                      struct mpsse_worker *my_worker)
+{
+       struct mpsse_worker *worker, *worker_tmp;
+       struct list_head destructors = LIST_HEAD_INIT(destructors);
+
+       scoped_guard(raw_spinlock_irqsave, &priv->irq_spin) {
+               list_for_each_entry_safe(worker, worker_tmp,
+                                        &priv->workers, list) {
+                       /* Don't stop ourselves */
+                       if (worker == my_worker)
+                               continue;
+
+                       list_del(&worker->list);
+
+                       /* Give worker a chance to terminate itself */
+                       atomic_set(&worker->cancelled, 1);
+                       /* Keep track of stuff to cancel */
+                       INIT_LIST_HEAD(&worker->destroy);
+                       list_add(&worker->destroy, &destructors);
+               }
+       }
+
+       list_for_each_entry_safe(worker, worker_tmp,
+                                &destructors, destroy) {
+               list_del(&worker->destroy);
+               cancel_work_sync(&worker->work);
+               kfree(worker);
+       }
+}
+
+static void gpio_mpsse_poll(struct work_struct *my_work)
 {
        unsigned long pin_mask, pin_states, flags;
        int irq_enabled, offset, err, value, fire_irq,
                irq, old_value[16], irq_type[16];
-       struct mpsse_priv *priv = container_of(work, struct mpsse_priv,
-                                              irq_work);
+       struct mpsse_worker *my_worker = container_of(my_work, struct mpsse_worker, work);
+       struct mpsse_priv *priv = my_worker->priv;
 
        for (offset = 0; offset < priv->gpio.ngpio; ++offset)
                old_value[offset] = -1;
 
-       while ((irq_enabled = atomic_read(&priv->irq_enabled))) {
+       /*
+        * We only want one worker. Workers race to acquire irq_race and tear
+        * down all other workers. This is a cond guard so that we don't deadlock
+        * trying to cancel a worker.
+        */
+       scoped_cond_guard(mutex_try, return, &priv->irq_race)
+               gpio_mpsse_stop_all_except(priv, my_worker);
+
+       while ((irq_enabled = atomic_read(&priv->irq_enabled)) &&
+              !atomic_read(&my_worker->cancelled)) {
                usleep_range(MPSSE_POLL_INTERVAL, MPSSE_POLL_INTERVAL + 1000);
                /* Cleanup will trigger at the end of the loop */
                guard(mutex)(&priv->irq_mutex);
@@ -369,21 +424,45 @@ static int gpio_mpsse_set_irq_type(struct irq_data *irqd, unsigned int type)
 
 static void gpio_mpsse_irq_disable(struct irq_data *irqd)
 {
+       struct mpsse_worker *worker;
        struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
 
        atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
        gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
+
+       /*
+        * Can't actually do teardown in IRQ context (it blocks).
+        * As a result, these workers will stick around until irq is reenabled
+        * or device gets disconnected
+        */
+       scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+               list_for_each_entry(worker, &priv->workers, list)
+                       atomic_set(&worker->cancelled, 1);
 }
 
 static void gpio_mpsse_irq_enable(struct irq_data *irqd)
 {
+       struct mpsse_worker *worker;
        struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
 
        gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
        /* If no-one else was using the IRQ, enable it */
        if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
-               INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
-               schedule_work(&priv->irq_work);
+               /*
+                * Can't be devm because it uses a non-raw spinlock (illegal in
+                * this context, where a raw spinlock is held by our caller)
+                */
+               worker = kzalloc(sizeof(*worker), GFP_NOWAIT);
+               if (!worker)
+                       return;
+
+               worker->priv = priv;
+               INIT_LIST_HEAD(&worker->list);
+               INIT_WORK(&worker->work, gpio_mpsse_poll);
+               schedule_work(&worker->work);
+
+               scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+                       list_add(&worker->list, &priv->workers);
        }
 }
 
@@ -435,6 +514,12 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
        if (err)
                return err;
 
+       err = devm_mutex_init(dev, &priv->irq_race);
+       if (err)
+               return err;
+
+       raw_spin_lock_init(&priv->irq_spin);
+
        priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
                                          "gpio-mpsse.%d.%d",
                                          priv->id, priv->intf_id);
@@ -505,6 +590,13 @@ static void gpio_mpsse_disconnect(struct usb_interface *intf)
 {
        struct mpsse_priv *priv = usb_get_intfdata(intf);
 
+       /*
+        * Lock prevents double-free of worker from here and the teardown
+        * step at the beginning of gpio_mpsse_poll
+        */
+       scoped_guard(mutex, &priv->irq_race)
+               gpio_mpsse_stop_all_except(priv, NULL);
+
        priv->intf = NULL;
        usb_set_intfdata(intf, NULL);
        usb_put_dev(priv->udev);