]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
um: Fix IRQ controller regression on console read
authorJouni Malinen <j@w1.fi>
Mon, 6 May 2019 12:39:35 +0000 (14:39 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 27 Jan 2020 13:50:59 +0000 (14:50 +0100)
[ Upstream commit bebe4681d0e7e1be2608282dc86645728bc7f623 ]

The conversion of UML to use epoll based IRQ controller claimed that
clone_one_chan() can safely call um_free_irq() while starting to ignore
the delay_free_irq parameter that explicitly noted that the IRQ cannot
be freed because this is being called from chan_interrupt(). This
resulted in free_irq() getting called in interrupt context ("Trying to
free IRQ 6 from IRQ context!").

Fix this by restoring previously used delay_free_irq processing.

Fixes: ff6a17989c08 ("Epoll based IRQ controller")
Signed-off-by: Jouni Malinen <j@w1.fi>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/um/drivers/chan_kern.c
arch/um/kernel/irq.c

index 05588f9466c7ff1a1b028cb710cab37b65b0d7ba..13ba195f9c9c8b1b3ec9344c60f126c0b374d3f0 100644 (file)
@@ -171,19 +171,55 @@ int enable_chan(struct line *line)
        return err;
 }
 
+/* Items are added in IRQ context, when free_irq can't be called, and
+ * removed in process context, when it can.
+ * This handles interrupt sources which disappear, and which need to
+ * be permanently disabled.  This is discovered in IRQ context, but
+ * the freeing of the IRQ must be done later.
+ */
+static DEFINE_SPINLOCK(irqs_to_free_lock);
+static LIST_HEAD(irqs_to_free);
+
+void free_irqs(void)
+{
+       struct chan *chan;
+       LIST_HEAD(list);
+       struct list_head *ele;
+       unsigned long flags;
+
+       spin_lock_irqsave(&irqs_to_free_lock, flags);
+       list_splice_init(&irqs_to_free, &list);
+       spin_unlock_irqrestore(&irqs_to_free_lock, flags);
+
+       list_for_each(ele, &list) {
+               chan = list_entry(ele, struct chan, free_list);
+
+               if (chan->input && chan->enabled)
+                       um_free_irq(chan->line->driver->read_irq, chan);
+               if (chan->output && chan->enabled)
+                       um_free_irq(chan->line->driver->write_irq, chan);
+               chan->enabled = 0;
+       }
+}
+
 static void close_one_chan(struct chan *chan, int delay_free_irq)
 {
+       unsigned long flags;
+
        if (!chan->opened)
                return;
 
-    /* we can safely call free now - it will be marked
-     *  as free and freed once the IRQ stopped processing
-     */
-       if (chan->input && chan->enabled)
-               um_free_irq(chan->line->driver->read_irq, chan);
-       if (chan->output && chan->enabled)
-               um_free_irq(chan->line->driver->write_irq, chan);
-       chan->enabled = 0;
+       if (delay_free_irq) {
+               spin_lock_irqsave(&irqs_to_free_lock, flags);
+               list_add(&chan->free_list, &irqs_to_free);
+               spin_unlock_irqrestore(&irqs_to_free_lock, flags);
+       } else {
+               if (chan->input && chan->enabled)
+                       um_free_irq(chan->line->driver->read_irq, chan);
+               if (chan->output && chan->enabled)
+                       um_free_irq(chan->line->driver->write_irq, chan);
+               chan->enabled = 0;
+       }
        if (chan->ops->close != NULL)
                (*chan->ops->close)(chan->fd, chan->data);
 
index 6b7f3827d6e4add1993315c220bf96217bfb8986..2753718d31b9d150d48433676af8bc5b5c93105a 100644 (file)
@@ -21,6 +21,8 @@
 #include <irq_user.h>
 
 
+extern void free_irqs(void);
+
 /* When epoll triggers we do not know why it did so
  * we can also have different IRQs for read and write.
  * This is why we keep a small irq_fd array for each fd -
@@ -100,6 +102,8 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
                        }
                }
        }
+
+       free_irqs();
 }
 
 static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)