]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Feb 2020 04:51:20 +0000 (05:51 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Feb 2020 04:51:20 +0000 (05:51 +0100)
added patches:
padata-remove-broken-queue-flushing.patch
serial-imx-ensure-that-rx-irqs-are-off-if-rx-is-off.patch
serial-imx-only-handle-irqs-that-are-actually-enabled.patch

queue-4.14/padata-remove-broken-queue-flushing.patch [new file with mode: 0644]
queue-4.14/serial-imx-ensure-that-rx-irqs-are-off-if-rx-is-off.patch [new file with mode: 0644]
queue-4.14/serial-imx-only-handle-irqs-that-are-actually-enabled.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/padata-remove-broken-queue-flushing.patch b/queue-4.14/padata-remove-broken-queue-flushing.patch
new file mode 100644 (file)
index 0000000..86c20d3
--- /dev/null
@@ -0,0 +1,141 @@
+From 07928d9bfc81640bab36f5190e8725894d93b659 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 19 Nov 2019 13:17:31 +0800
+Subject: padata: Remove broken queue flushing
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 07928d9bfc81640bab36f5190e8725894d93b659 upstream.
+
+The function padata_flush_queues is fundamentally broken because
+it cannot force padata users to complete the request that is
+underway.  IOW padata has to passively wait for the completion
+of any outstanding work.
+
+As it stands flushing is used in two places.  Its use in padata_stop
+is simply unnecessary because nothing depends on the queues to
+be flushed afterwards.
+
+The other use in padata_replace is more substantial as we depend
+on it to free the old pd structure.  This patch instead uses the
+pd->refcnt to dynamically free the pd structure once all requests
+are complete.
+
+Fixes: 2b73b07ab8a4 ("padata: Flush the padata queues actively")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[dj: leave "pd->pinst = pinst" assignment in padata_alloc_pd()]
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/padata.c |   45 ++++++++++++---------------------------------
+ 1 file changed, 12 insertions(+), 33 deletions(-)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -34,6 +34,8 @@
+ #define MAX_OBJ_NUM 1000
++static void padata_free_pd(struct parallel_data *pd);
++
+ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+ {
+       int cpu, target_cpu;
+@@ -292,6 +294,7 @@ static void padata_serial_worker(struct
+       struct padata_serial_queue *squeue;
+       struct parallel_data *pd;
+       LIST_HEAD(local_list);
++      int cnt;
+       local_bh_disable();
+       squeue = container_of(serial_work, struct padata_serial_queue, work);
+@@ -301,6 +304,8 @@ static void padata_serial_worker(struct
+       list_replace_init(&squeue->serial.list, &local_list);
+       spin_unlock(&squeue->serial.lock);
++      cnt = 0;
++
+       while (!list_empty(&local_list)) {
+               struct padata_priv *padata;
+@@ -310,9 +315,12 @@ static void padata_serial_worker(struct
+               list_del_init(&padata->list);
+               padata->serial(padata);
+-              atomic_dec(&pd->refcnt);
++              cnt++;
+       }
+       local_bh_enable();
++
++      if (atomic_sub_and_test(cnt, &pd->refcnt))
++              padata_free_pd(pd);
+ }
+ /**
+@@ -435,7 +443,7 @@ static struct parallel_data *padata_allo
+       setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+       atomic_set(&pd->seq_nr, -1);
+       atomic_set(&pd->reorder_objects, 0);
+-      atomic_set(&pd->refcnt, 0);
++      atomic_set(&pd->refcnt, 1);
+       pd->pinst = pinst;
+       spin_lock_init(&pd->lock);
+@@ -460,31 +468,6 @@ static void padata_free_pd(struct parall
+       kfree(pd);
+ }
+-/* Flush all objects out of the padata queues. */
+-static void padata_flush_queues(struct parallel_data *pd)
+-{
+-      int cpu;
+-      struct padata_parallel_queue *pqueue;
+-      struct padata_serial_queue *squeue;
+-
+-      for_each_cpu(cpu, pd->cpumask.pcpu) {
+-              pqueue = per_cpu_ptr(pd->pqueue, cpu);
+-              flush_work(&pqueue->work);
+-      }
+-
+-      del_timer_sync(&pd->timer);
+-
+-      if (atomic_read(&pd->reorder_objects))
+-              padata_reorder(pd);
+-
+-      for_each_cpu(cpu, pd->cpumask.cbcpu) {
+-              squeue = per_cpu_ptr(pd->squeue, cpu);
+-              flush_work(&squeue->work);
+-      }
+-
+-      BUG_ON(atomic_read(&pd->refcnt) != 0);
+-}
+-
+ static void __padata_start(struct padata_instance *pinst)
+ {
+       pinst->flags |= PADATA_INIT;
+@@ -498,10 +481,6 @@ static void __padata_stop(struct padata_
+       pinst->flags &= ~PADATA_INIT;
+       synchronize_rcu();
+-
+-      get_online_cpus();
+-      padata_flush_queues(pinst->pd);
+-      put_online_cpus();
+ }
+ /* Replace the internal control structure with a new one. */
+@@ -522,8 +501,8 @@ static void padata_replace(struct padata
+       if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
+               notification_mask |= PADATA_CPU_SERIAL;
+-      padata_flush_queues(pd_old);
+-      padata_free_pd(pd_old);
++      if (atomic_dec_and_test(&pd_old->refcnt))
++              padata_free_pd(pd_old);
+       if (notification_mask)
+               blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
diff --git a/queue-4.14/serial-imx-ensure-that-rx-irqs-are-off-if-rx-is-off.patch b/queue-4.14/serial-imx-ensure-that-rx-irqs-are-off-if-rx-is-off.patch
new file mode 100644 (file)
index 0000000..f8a86e4
--- /dev/null
@@ -0,0 +1,250 @@
+From 76821e222c189b81d553b855ee7054340607eb46 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Fri, 2 Mar 2018 11:07:26 +0100
+Subject: serial: imx: ensure that RX irqs are off if RX is off
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit 76821e222c189b81d553b855ee7054340607eb46 upstream.
+
+Make sure that UCR1.RXDMAEN and UCR1.ATDMAEN (for the DMA case) and
+UCR1.RRDYEN (for the PIO case) are off iff UCR1.RXEN is disabled. This
+ensures that the fifo isn't read with RX disabled which results in an
+exception.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+[Backport to v4.14]
+Signed-off-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/imx.c |  116 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 78 insertions(+), 38 deletions(-)
+
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -80,7 +80,7 @@
+ #define UCR1_IDEN     (1<<12) /* Idle condition interrupt */
+ #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
+ #define UCR1_RRDYEN   (1<<9)  /* Recv ready interrupt enable */
+-#define UCR1_RDMAEN   (1<<8)  /* Recv ready DMA enable */
++#define UCR1_RXDMAEN  (1<<8)  /* Recv ready DMA enable */
+ #define UCR1_IREN     (1<<7)  /* Infrared interface enable */
+ #define UCR1_TXMPTYEN (1<<6)  /* Transimitter empty interrupt enable */
+ #define UCR1_RTSDEN   (1<<5)  /* RTS delta interrupt enable */
+@@ -355,6 +355,30 @@ static void imx_port_rts_auto(struct imx
+ /*
+  * interrupts disabled on entry
+  */
++static void imx_start_rx(struct uart_port *port)
++{
++      struct imx_port *sport = (struct imx_port *)port;
++      unsigned int ucr1, ucr2;
++
++      ucr1 = readl(port->membase + UCR1);
++      ucr2 = readl(port->membase + UCR2);
++
++      ucr2 |= UCR2_RXEN;
++
++      if (sport->dma_is_enabled) {
++              ucr1 |= UCR1_RXDMAEN | UCR1_ATDMAEN;
++      } else {
++              ucr1 |= UCR1_RRDYEN;
++      }
++
++      /* Write UCR2 first as it includes RXEN */
++      writel(ucr2, port->membase + UCR2);
++      writel(ucr1, port->membase + UCR1);
++}
++
++/*
++ * interrupts disabled on entry
++ */
+ static void imx_stop_tx(struct uart_port *port)
+ {
+       struct imx_port *sport = (struct imx_port *)port;
+@@ -378,9 +402,10 @@ static void imx_stop_tx(struct uart_port
+                       imx_port_rts_active(sport, &temp);
+               else
+                       imx_port_rts_inactive(sport, &temp);
+-              temp |= UCR2_RXEN;
+               writel(temp, port->membase + UCR2);
++              imx_start_rx(port);
++
+               temp = readl(port->membase + UCR4);
+               temp &= ~UCR4_TCEN;
+               writel(temp, port->membase + UCR4);
+@@ -393,7 +418,7 @@ static void imx_stop_tx(struct uart_port
+ static void imx_stop_rx(struct uart_port *port)
+ {
+       struct imx_port *sport = (struct imx_port *)port;
+-      unsigned long temp;
++      unsigned long ucr1, ucr2;
+       if (sport->dma_is_enabled && sport->dma_is_rxing) {
+               if (sport->port.suspended) {
+@@ -404,12 +429,18 @@ static void imx_stop_rx(struct uart_port
+               }
+       }
+-      temp = readl(sport->port.membase + UCR2);
+-      writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
++      ucr1 = readl(sport->port.membase + UCR1);
++      ucr2 = readl(sport->port.membase + UCR2);
+-      /* disable the `Receiver Ready Interrrupt` */
+-      temp = readl(sport->port.membase + UCR1);
+-      writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1);
++      if (sport->dma_is_enabled) {
++              ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN);
++      } else {
++              ucr1 &= ~UCR1_RRDYEN;
++      }
++      writel(ucr1, port->membase + UCR1);
++
++      ucr2 &= ~UCR2_RXEN;
++      writel(ucr2, port->membase + UCR2);
+ }
+ /*
+@@ -581,10 +612,11 @@ static void imx_start_tx(struct uart_por
+                       imx_port_rts_active(sport, &temp);
+               else
+                       imx_port_rts_inactive(sport, &temp);
+-              if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+-                      temp &= ~UCR2_RXEN;
+               writel(temp, port->membase + UCR2);
++              if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
++                      imx_stop_rx(port);
++
+               /* enable transmitter and shifter empty irq */
+               temp = readl(port->membase + UCR4);
+               temp |= UCR4_TCEN;
+@@ -1206,7 +1238,7 @@ static void imx_enable_dma(struct imx_po
+       /* set UCR1 */
+       temp = readl(sport->port.membase + UCR1);
+-      temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
++      temp |= UCR1_RXDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
+       writel(temp, sport->port.membase + UCR1);
+       temp = readl(sport->port.membase + UCR2);
+@@ -1224,7 +1256,7 @@ static void imx_disable_dma(struct imx_p
+       /* clear UCR1 */
+       temp = readl(sport->port.membase + UCR1);
+-      temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
++      temp &= ~(UCR1_RXDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
+       writel(temp, sport->port.membase + UCR1);
+       /* clear UCR2 */
+@@ -1289,11 +1321,9 @@ static int imx_startup(struct uart_port
+       writel(USR1_RTSD | USR1_DTRD, sport->port.membase + USR1);
+       writel(USR2_ORE, sport->port.membase + USR2);
+-      if (sport->dma_is_inited && !sport->dma_is_enabled)
+-              imx_enable_dma(sport);
+-
+       temp = readl(sport->port.membase + UCR1);
+-      temp |= UCR1_RRDYEN | UCR1_UARTEN;
++      temp &= ~UCR1_RRDYEN;
++      temp |= UCR1_UARTEN;
+       if (sport->have_rtscts)
+                       temp |= UCR1_RTSDEN;
+@@ -1332,14 +1362,13 @@ static int imx_startup(struct uart_port
+        */
+       imx_enable_ms(&sport->port);
+-      /*
+-       * Start RX DMA immediately instead of waiting for RX FIFO interrupts.
+-       * In our iMX53 the average delay for the first reception dropped from
+-       * approximately 35000 microseconds to 1000 microseconds.
+-       */
+-      if (sport->dma_is_enabled) {
+-              imx_disable_rx_int(sport);
++      if (sport->dma_is_inited) {
++              imx_enable_dma(sport);
+               start_rx_dma(sport);
++      } else {
++              temp = readl(sport->port.membase + UCR1);
++              temp |= UCR1_RRDYEN;
++              writel(temp, sport->port.membase + UCR1);
+       }
+       spin_unlock_irqrestore(&sport->port.lock, flags);
+@@ -1386,7 +1415,8 @@ static void imx_shutdown(struct uart_por
+       spin_lock_irqsave(&sport->port.lock, flags);
+       temp = readl(sport->port.membase + UCR1);
+-      temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
++      temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN |
++                UCR1_RXDMAEN | UCR1_ATDMAEN);
+       writel(temp, sport->port.membase + UCR1);
+       spin_unlock_irqrestore(&sport->port.lock, flags);
+@@ -1659,7 +1689,7 @@ static int imx_poll_init(struct uart_por
+ {
+       struct imx_port *sport = (struct imx_port *)port;
+       unsigned long flags;
+-      unsigned long temp;
++      unsigned long ucr1, ucr2;
+       int retval;
+       retval = clk_prepare_enable(sport->clk_ipg);
+@@ -1673,16 +1703,29 @@ static int imx_poll_init(struct uart_por
+       spin_lock_irqsave(&sport->port.lock, flags);
+-      temp = readl(sport->port.membase + UCR1);
++      /*
++       * Be careful about the order of enabling bits here. First enable the
++       * receiver (UARTEN + RXEN) and only then the corresponding irqs.
++       * This prevents that a character that already sits in the RX fifo is
++       * triggering an irq but the try to fetch it from there results in an
++       * exception because UARTEN or RXEN is still off.
++       */
++      ucr1 = readl(port->membase + UCR1);
++      ucr2 = readl(port->membase + UCR2);
++
+       if (is_imx1_uart(sport))
+-              temp |= IMX1_UCR1_UARTCLKEN;
+-      temp |= UCR1_UARTEN | UCR1_RRDYEN;
+-      temp &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN);
+-      writel(temp, sport->port.membase + UCR1);
++              ucr1 |= IMX1_UCR1_UARTCLKEN;
+-      temp = readl(sport->port.membase + UCR2);
+-      temp |= UCR2_RXEN;
+-      writel(temp, sport->port.membase + UCR2);
++      ucr1 |= UCR1_UARTEN;
++      ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN | UCR1_RRDYEN);
++
++      ucr2 |= UCR2_RXEN;
++
++      writel(ucr1, sport->port.membase + UCR1);
++      writel(ucr2, sport->port.membase + UCR2);
++
++      /* now enable irqs */
++      writel(ucr1 | UCR1_RRDYEN, sport->port.membase + UCR1);
+       spin_unlock_irqrestore(&sport->port.lock, flags);
+@@ -1742,11 +1785,8 @@ static int imx_rs485_config(struct uart_
+       /* Make sure Rx is enabled in case Tx is active with Rx disabled */
+       if (!(rs485conf->flags & SER_RS485_ENABLED) ||
+-          rs485conf->flags & SER_RS485_RX_DURING_TX) {
+-              temp = readl(sport->port.membase + UCR2);
+-              temp |= UCR2_RXEN;
+-              writel(temp, sport->port.membase + UCR2);
+-      }
++          rs485conf->flags & SER_RS485_RX_DURING_TX)
++              imx_start_rx(port);
+       port->rs485 = *rs485conf;
diff --git a/queue-4.14/serial-imx-only-handle-irqs-that-are-actually-enabled.patch b/queue-4.14/serial-imx-only-handle-irqs-that-are-actually-enabled.patch
new file mode 100644 (file)
index 0000000..02fd18d
--- /dev/null
@@ -0,0 +1,123 @@
+From 437768962f754d9501e5ba4d98b1f2a89dc62028 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Sun, 18 Feb 2018 22:02:44 +0100
+Subject: serial: imx: Only handle irqs that are actually enabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit 437768962f754d9501e5ba4d98b1f2a89dc62028 upstream.
+
+Handling an irq that isn't enabled can have some undesired side effects.
+Some of these are mentioned in the newly introduced code comment. Some
+of the irq sources already had their handling right, some don't. Handle
+them all in the same consistent way.
+
+The change for USR1_RRDY and USR1_AGTIM drops the check for
+dma_is_enabled. This is correct as UCR1_RRDYEN and UCR2_ATEN are always
+off if dma is enabled.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Reviewed-by: Shawn Guo <shawnguo@kernel.org>
+[Backport to v4.14]
+Signed-off-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/imx.c |   53 ++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 39 insertions(+), 14 deletions(-)
+
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -843,14 +843,42 @@ static void imx_mctrl_check(struct imx_p
+ static irqreturn_t imx_int(int irq, void *dev_id)
+ {
+       struct imx_port *sport = dev_id;
+-      unsigned int sts;
+-      unsigned int sts2;
++      unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
+       irqreturn_t ret = IRQ_NONE;
+-      sts = readl(sport->port.membase + USR1);
+-      sts2 = readl(sport->port.membase + USR2);
++      usr1 = readl(sport->port.membase + USR1);
++      usr2 = readl(sport->port.membase + USR2);
++      ucr1 = readl(sport->port.membase + UCR1);
++      ucr2 = readl(sport->port.membase + UCR2);
++      ucr3 = readl(sport->port.membase + UCR3);
++      ucr4 = readl(sport->port.membase + UCR4);
++
++      /*
++       * Even if a condition is true that can trigger an irq only handle it if
++       * the respective irq source is enabled. This prevents some undesired
++       * actions, for example if a character that sits in the RX FIFO and that
++       * should be fetched via DMA is tried to be fetched using PIO. Or the
++       * receiver is currently off and so reading from URXD0 results in an
++       * exception. So just mask the (raw) status bits for disabled irqs.
++       */
++      if ((ucr1 & UCR1_RRDYEN) == 0)
++              usr1 &= ~USR1_RRDY;
++      if ((ucr2 & UCR2_ATEN) == 0)
++              usr1 &= ~USR1_AGTIM;
++      if ((ucr1 & UCR1_TXMPTYEN) == 0)
++              usr1 &= ~USR1_TRDY;
++      if ((ucr4 & UCR4_TCEN) == 0)
++              usr2 &= ~USR2_TXDC;
++      if ((ucr3 & UCR3_DTRDEN) == 0)
++              usr1 &= ~USR1_DTRD;
++      if ((ucr1 & UCR1_RTSDEN) == 0)
++              usr1 &= ~USR1_RTSD;
++      if ((ucr3 & UCR3_AWAKEN) == 0)
++              usr1 &= ~USR1_AWAKE;
++      if ((ucr4 & UCR4_OREN) == 0)
++              usr2 &= ~USR2_ORE;
+-      if (sts & (USR1_RRDY | USR1_AGTIM)) {
++      if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
+               if (sport->dma_is_enabled)
+                       imx_dma_rxint(sport);
+               else
+@@ -858,18 +886,15 @@ static irqreturn_t imx_int(int irq, void
+               ret = IRQ_HANDLED;
+       }
+-      if ((sts & USR1_TRDY &&
+-           readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
+-          (sts2 & USR2_TXDC &&
+-           readl(sport->port.membase + UCR4) & UCR4_TCEN)) {
++      if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) {
+               imx_txint(irq, dev_id);
+               ret = IRQ_HANDLED;
+       }
+-      if (sts & USR1_DTRD) {
++      if (usr1 & USR1_DTRD) {
+               unsigned long flags;
+-              if (sts & USR1_DTRD)
++              if (usr1 & USR1_DTRD)
+                       writel(USR1_DTRD, sport->port.membase + USR1);
+               spin_lock_irqsave(&sport->port.lock, flags);
+@@ -879,17 +904,17 @@ static irqreturn_t imx_int(int irq, void
+               ret = IRQ_HANDLED;
+       }
+-      if (sts & USR1_RTSD) {
++      if (usr1 & USR1_RTSD) {
+               imx_rtsint(irq, dev_id);
+               ret = IRQ_HANDLED;
+       }
+-      if (sts & USR1_AWAKE) {
++      if (usr1 & USR1_AWAKE) {
+               writel(USR1_AWAKE, sport->port.membase + USR1);
+               ret = IRQ_HANDLED;
+       }
+-      if (sts2 & USR2_ORE) {
++      if (usr2 & USR2_ORE) {
+               sport->port.icount.overrun++;
+               writel(USR2_ORE, sport->port.membase + USR2);
+               ret = IRQ_HANDLED;
index e38390337eaf61a6bf4163a52a49803d71fdb732..11966cc1534fa1ce9ede8b0ec9a1f1e00901bdfa 100644 (file)
@@ -16,3 +16,6 @@ btrfs-log-message-when-rw-remount-is-attempted-with-unclean-tree-log.patch
 arm64-ssbs-fix-context-switch-when-ssbs-is-present-on-all-cpus.patch
 kvm-nvmx-use-correct-root-level-for-nested-ept-shadow-page-tables.patch
 perf-x86-amd-add-missing-l2-misses-event-spec-to-amd-family-17h-s-event-map.patch
+padata-remove-broken-queue-flushing.patch
+serial-imx-ensure-that-rx-irqs-are-off-if-rx-is-off.patch
+serial-imx-only-handle-irqs-that-are-actually-enabled.patch