]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 Dec 2023 12:56:05 +0000 (13:56 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 Dec 2023 12:56:05 +0000 (13:56 +0100)
added patches:
arm-pl011-fix-dma-support.patch
kvm-s390-mm-properly-reset-no-dat.patch
parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch
serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch
serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch

queue-4.14/arm-pl011-fix-dma-support.patch [new file with mode: 0644]
queue-4.14/kvm-s390-mm-properly-reset-no-dat.patch [new file with mode: 0644]
queue-4.14/parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch [new file with mode: 0644]
queue-4.14/serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch [new file with mode: 0644]
queue-4.14/serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/arm-pl011-fix-dma-support.patch b/queue-4.14/arm-pl011-fix-dma-support.patch
new file mode 100644 (file)
index 0000000..88a3b92
--- /dev/null
@@ -0,0 +1,335 @@
+From 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 22 Nov 2023 18:15:03 +0100
+Subject: ARM: PL011: Fix DMA support
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 upstream.
+
+Since there is no guarantee that the memory returned by
+dma_alloc_coherent() is associated with a 'struct page', using the
+architecture specific phys_to_page() is wrong, but using
+virt_to_page() would be as well.
+
+Stop using sg lists altogether and just use the *_single() functions
+instead. This also simplifies the code a bit since the scatterlists in
+this driver always have only one entry anyway.
+
+https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
+    Use consistent names for dma buffers
+
+gc: Add a commit log from the initial thread:
+https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
+    Use consistent names for dma buffers
+
+Fixes: cb06ff102e2d7 ("ARM: PL011: Add support for Rx DMA buffer polling.")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20231122171503.235649-1-gregory.clement@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/amba-pl011.c |  112 +++++++++++++++++++---------------------
+ 1 file changed, 54 insertions(+), 58 deletions(-)
+
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -240,17 +240,18 @@ static struct vendor_data vendor_zte = {
+ /* Deals with DMA transactions */
+-struct pl011_sgbuf {
+-      struct scatterlist sg;
+-      char *buf;
++struct pl011_dmabuf {
++      dma_addr_t              dma;
++      size_t                  len;
++      char                    *buf;
+ };
+ struct pl011_dmarx_data {
+       struct dma_chan         *chan;
+       struct completion       complete;
+       bool                    use_buf_b;
+-      struct pl011_sgbuf      sgbuf_a;
+-      struct pl011_sgbuf      sgbuf_b;
++      struct pl011_dmabuf     dbuf_a;
++      struct pl011_dmabuf     dbuf_b;
+       dma_cookie_t            cookie;
+       bool                    running;
+       struct timer_list       timer;
+@@ -263,7 +264,8 @@ struct pl011_dmarx_data {
+ struct pl011_dmatx_data {
+       struct dma_chan         *chan;
+-      struct scatterlist      sg;
++      dma_addr_t              dma;
++      size_t                  len;
+       char                    *buf;
+       bool                    queued;
+ };
+@@ -384,32 +386,24 @@ static int pl011_fifo_to_tty(struct uart
+ #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
+       enum dma_data_direction dir)
+ {
+-      dma_addr_t dma_addr;
+-
+-      sg->buf = dma_alloc_coherent(chan->device->dev,
+-              PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
+-      if (!sg->buf)
++      db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
++                                   &db->dma, GFP_KERNEL);
++      if (!db->buf)
+               return -ENOMEM;
+-
+-      sg_init_table(&sg->sg, 1);
+-      sg_set_page(&sg->sg, phys_to_page(dma_addr),
+-              PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
+-      sg_dma_address(&sg->sg) = dma_addr;
+-      sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
++      db->len = PL011_DMA_BUFFER_SIZE;
+       return 0;
+ }
+-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
+       enum dma_data_direction dir)
+ {
+-      if (sg->buf) {
++      if (db->buf) {
+               dma_free_coherent(chan->device->dev,
+-                      PL011_DMA_BUFFER_SIZE, sg->buf,
+-                      sg_dma_address(&sg->sg));
++                                PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
+       }
+ }
+@@ -570,8 +564,8 @@ static void pl011_dma_tx_callback(void *
+       spin_lock_irqsave(&uap->port.lock, flags);
+       if (uap->dmatx.queued)
+-              dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
+-                           DMA_TO_DEVICE);
++              dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
++                              dmatx->len, DMA_TO_DEVICE);
+       dmacr = uap->dmacr;
+       uap->dmacr = dmacr & ~UART011_TXDMAE;
+@@ -657,18 +651,19 @@ static int pl011_dma_tx_refill(struct ua
+                       memcpy(&dmatx->buf[first], &xmit->buf[0], second);
+       }
+-      dmatx->sg.length = count;
+-
+-      if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
++      dmatx->len = count;
++      dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
++                                  DMA_TO_DEVICE);
++      if (dmatx->dma == DMA_MAPPING_ERROR) {
+               uap->dmatx.queued = false;
+               dev_dbg(uap->port.dev, "unable to map TX DMA\n");
+               return -EBUSY;
+       }
+-      desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
++      desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
+                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc) {
+-              dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
++              dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
+               uap->dmatx.queued = false;
+               /*
+                * If DMA cannot be used right now, we complete this
+@@ -832,8 +827,8 @@ __acquires(&uap->port.lock)
+       dmaengine_terminate_async(uap->dmatx.chan);
+       if (uap->dmatx.queued) {
+-              dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+-                           DMA_TO_DEVICE);
++              dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
++                               uap->dmatx.len, DMA_TO_DEVICE);
+               uap->dmatx.queued = false;
+               uap->dmacr &= ~UART011_TXDMAE;
+               pl011_write(uap->dmacr, uap, REG_DMACR);
+@@ -847,15 +842,15 @@ static int pl011_dma_rx_trigger_dma(stru
+       struct dma_chan *rxchan = uap->dmarx.chan;
+       struct pl011_dmarx_data *dmarx = &uap->dmarx;
+       struct dma_async_tx_descriptor *desc;
+-      struct pl011_sgbuf *sgbuf;
++      struct pl011_dmabuf *dbuf;
+       if (!rxchan)
+               return -EIO;
+       /* Start the RX DMA job */
+-      sgbuf = uap->dmarx.use_buf_b ?
+-              &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+-      desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
++      dbuf = uap->dmarx.use_buf_b ?
++              &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
++      desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
+                                       DMA_DEV_TO_MEM,
+                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       /*
+@@ -895,8 +890,8 @@ static void pl011_dma_rx_chars(struct ua
+                              bool readfifo)
+ {
+       struct tty_port *port = &uap->port.state->port;
+-      struct pl011_sgbuf *sgbuf = use_buf_b ?
+-              &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++      struct pl011_dmabuf *dbuf = use_buf_b ?
++              &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+       int dma_count = 0;
+       u32 fifotaken = 0; /* only used for vdbg() */
+@@ -905,7 +900,7 @@ static void pl011_dma_rx_chars(struct ua
+       if (uap->dmarx.poll_rate) {
+               /* The data can be taken by polling */
+-              dmataken = sgbuf->sg.length - dmarx->last_residue;
++              dmataken = dbuf->len - dmarx->last_residue;
+               /* Recalculate the pending size */
+               if (pending >= dmataken)
+                       pending -= dmataken;
+@@ -919,7 +914,7 @@ static void pl011_dma_rx_chars(struct ua
+                * Note that tty_insert_flip_buf() tries to take as many chars
+                * as it can.
+                */
+-              dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++              dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+                               pending);
+               uap->port.icount.rx += dma_count;
+@@ -930,7 +925,7 @@ static void pl011_dma_rx_chars(struct ua
+       /* Reset the last_residue for Rx DMA poll */
+       if (uap->dmarx.poll_rate)
+-              dmarx->last_residue = sgbuf->sg.length;
++              dmarx->last_residue = dbuf->len;
+       /*
+        * Only continue with trying to read the FIFO if all DMA chars have
+@@ -967,8 +962,8 @@ static void pl011_dma_rx_irq(struct uart
+ {
+       struct pl011_dmarx_data *dmarx = &uap->dmarx;
+       struct dma_chan *rxchan = dmarx->chan;
+-      struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+-              &dmarx->sgbuf_b : &dmarx->sgbuf_a;
++      struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++              &dmarx->dbuf_b : &dmarx->dbuf_a;
+       size_t pending;
+       struct dma_tx_state state;
+       enum dma_status dmastat;
+@@ -990,7 +985,7 @@ static void pl011_dma_rx_irq(struct uart
+       pl011_write(uap->dmacr, uap, REG_DMACR);
+       uap->dmarx.running = false;
+-      pending = sgbuf->sg.length - state.residue;
++      pending = dbuf->len - state.residue;
+       BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+       /* Then we terminate the transfer - we now know our residue */
+       dmaengine_terminate_all(rxchan);
+@@ -1017,8 +1012,8 @@ static void pl011_dma_rx_callback(void *
+       struct pl011_dmarx_data *dmarx = &uap->dmarx;
+       struct dma_chan *rxchan = dmarx->chan;
+       bool lastbuf = dmarx->use_buf_b;
+-      struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+-              &dmarx->sgbuf_b : &dmarx->sgbuf_a;
++      struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++              &dmarx->dbuf_b : &dmarx->dbuf_a;
+       size_t pending;
+       struct dma_tx_state state;
+       int ret;
+@@ -1036,7 +1031,7 @@ static void pl011_dma_rx_callback(void *
+        * the DMA irq handler. So we check the residue here.
+        */
+       rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+-      pending = sgbuf->sg.length - state.residue;
++      pending = dbuf->len - state.residue;
+       BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+       /* Then we terminate the transfer - we now know our residue */
+       dmaengine_terminate_all(rxchan);
+@@ -1088,16 +1083,16 @@ static void pl011_dma_rx_poll(unsigned l
+       unsigned long flags = 0;
+       unsigned int dmataken = 0;
+       unsigned int size = 0;
+-      struct pl011_sgbuf *sgbuf;
++      struct pl011_dmabuf *dbuf;
+       int dma_count;
+       struct dma_tx_state state;
+-      sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++      dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+       rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+       if (likely(state.residue < dmarx->last_residue)) {
+-              dmataken = sgbuf->sg.length - dmarx->last_residue;
++              dmataken = dbuf->len - dmarx->last_residue;
+               size = dmarx->last_residue - state.residue;
+-              dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++              dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+                               size);
+               if (dma_count == size)
+                       dmarx->last_residue =  state.residue;
+@@ -1144,7 +1139,7 @@ static void pl011_dma_startup(struct uar
+               return;
+       }
+-      sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
++      uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
+       /* The DMA buffer is now the FIFO the TTY subsystem can use */
+       uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
+@@ -1154,7 +1149,7 @@ static void pl011_dma_startup(struct uar
+               goto skip_rx;
+       /* Allocate and map DMA RX buffers */
+-      ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++      ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+                              DMA_FROM_DEVICE);
+       if (ret) {
+               dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+@@ -1162,12 +1157,12 @@ static void pl011_dma_startup(struct uar
+               goto skip_rx;
+       }
+-      ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
++      ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
+                              DMA_FROM_DEVICE);
+       if (ret) {
+               dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+                       "RX buffer B", ret);
+-              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+                                DMA_FROM_DEVICE);
+               goto skip_rx;
+       }
+@@ -1223,8 +1218,9 @@ static void pl011_dma_shutdown(struct ua
+               /* In theory, this should already be done by pl011_dma_flush_buffer */
+               dmaengine_terminate_all(uap->dmatx.chan);
+               if (uap->dmatx.queued) {
+-                      dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+-                                   DMA_TO_DEVICE);
++                      dma_unmap_single(uap->dmatx.chan->device->dev,
++                                       uap->dmatx.dma, uap->dmatx.len,
++                                       DMA_TO_DEVICE);
+                       uap->dmatx.queued = false;
+               }
+@@ -1235,8 +1231,8 @@ static void pl011_dma_shutdown(struct ua
+       if (uap->using_rx_dma) {
+               dmaengine_terminate_all(uap->dmarx.chan);
+               /* Clean up the RX DMA */
+-              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+-              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
++              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
++              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
+               if (uap->dmarx.poll_rate)
+                       del_timer_sync(&uap->dmarx.timer);
+               uap->using_rx_dma = false;
diff --git a/queue-4.14/kvm-s390-mm-properly-reset-no-dat.patch b/queue-4.14/kvm-s390-mm-properly-reset-no-dat.patch
new file mode 100644 (file)
index 0000000..ecd6da5
--- /dev/null
@@ -0,0 +1,33 @@
+From 27072b8e18a73ffeffb1c140939023915a35134b Mon Sep 17 00:00:00 2001
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Date: Thu, 9 Nov 2023 13:36:24 +0100
+Subject: KVM: s390/mm: Properly reset no-dat
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+commit 27072b8e18a73ffeffb1c140939023915a35134b upstream.
+
+When the CMMA state needs to be reset, the no-dat bit also needs to be
+reset. Failure to do so could cause issues in the guest, since the
+guest expects the bit to be cleared after a reset.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Nico Boehr <nrb@linux.ibm.com>
+Message-ID: <20231109123624.37314-1-imbrenda@linux.ibm.com>
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/mm/pgtable.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -667,7 +667,7 @@ void ptep_zap_unused(struct mm_struct *m
+               pte_clear(mm, addr, ptep);
+       }
+       if (reset)
+-              pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
++              pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
+       pgste_set_unlock(ptep, pgste);
+       preempt_enable();
+ }
diff --git a/queue-4.14/parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch b/queue-4.14/parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch
new file mode 100644 (file)
index 0000000..62e462c
--- /dev/null
@@ -0,0 +1,65 @@
+From 1a031f6edc460e9562098bdedc3918da07c30a6e Mon Sep 17 00:00:00 2001
+From: Cameron Williams <cang1@live.co.uk>
+Date: Thu, 2 Nov 2023 21:10:40 +0000
+Subject: parport: Add support for Brainboxes IX/UC/PX parallel cards
+
+From: Cameron Williams <cang1@live.co.uk>
+
+commit 1a031f6edc460e9562098bdedc3918da07c30a6e upstream.
+
+Adds support for Intashield IX-500/IX-550, UC-146/UC-157, PX-146/PX-157,
+PX-203 and PX-475 (LPT port)
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Cameron Williams <cang1@live.co.uk>
+Acked-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Link: https://lore.kernel.org/r/AS4PR02MB790389C130410BD864C8DCC9C4A6A@AS4PR02MB7903.eurprd02.prod.outlook.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/parport/parport_pc.c |   21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2647,6 +2647,8 @@ enum parport_pc_pci_cards {
+       netmos_9865,
+       quatech_sppxp100,
+       wch_ch382l,
++      brainboxes_uc146,
++      brainboxes_px203,
+ };
+@@ -2710,6 +2712,8 @@ static struct parport_pc_pci {
+       /* netmos_9865 */               { 1, { { 0, -1 }, } },
+       /* quatech_sppxp100 */          { 1, { { 0, 1 }, } },
+       /* wch_ch382l */                { 1, { { 2, -1 }, } },
++      /* brainboxes_uc146 */  { 1, { { 3, -1 }, } },
++      /* brainboxes_px203 */  { 1, { { 0, -1 }, } },
+ };
+ static const struct pci_device_id parport_pc_pci_tbl[] = {
+@@ -2801,6 +2805,23 @@ static const struct pci_device_id parpor
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+       /* WCH CH382L PCI-E single parallel port card */
+       { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
++      /* Brainboxes IX-500/550 */
++      { PCI_VENDOR_ID_INTASHIELD, 0x402a,
++        PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++      /* Brainboxes UC-146/UC-157 */
++      { PCI_VENDOR_ID_INTASHIELD, 0x0be1,
++        PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++      { PCI_VENDOR_ID_INTASHIELD, 0x0be2,
++        PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++      /* Brainboxes PX-146/PX-257 */
++      { PCI_VENDOR_ID_INTASHIELD, 0x401c,
++        PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++      /* Brainboxes PX-203 */
++      { PCI_VENDOR_ID_INTASHIELD, 0x4007,
++        PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
++      /* Brainboxes PX-475 */
++      { PCI_VENDOR_ID_INTASHIELD, 0x401f,
++        PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+       { 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
diff --git a/queue-4.14/serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch b/queue-4.14/serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch
new file mode 100644 (file)
index 0000000..0d46574
--- /dev/null
@@ -0,0 +1,31 @@
+From 8e42c301ce64e0dcca547626eb486877d502d336 Mon Sep 17 00:00:00 2001
+From: Ronald Wahl <ronald.wahl@raritan.com>
+Date: Tue, 31 Oct 2023 14:12:42 +0100
+Subject: serial: 8250_omap: Add earlycon support for the AM654 UART controller
+
+From: Ronald Wahl <ronald.wahl@raritan.com>
+
+commit 8e42c301ce64e0dcca547626eb486877d502d336 upstream.
+
+Currently there is no support for earlycon on the AM654 UART
+controller. This commit adds it.
+
+Signed-off-by: Ronald Wahl <ronald.wahl@raritan.com>
+Reviewed-by: Vignesh Raghavendra <vigneshr@ti.com>
+Link: https://lore.kernel.org/r/20231031131242.15516-1-rwahl@gmx.de
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_early.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/tty/serial/8250/8250_early.c
++++ b/drivers/tty/serial/8250/8250_early.c
+@@ -179,6 +179,7 @@ static int __init early_omap8250_setup(s
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
++OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup);
+ #endif
diff --git a/queue-4.14/serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch b/queue-4.14/serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch
new file mode 100644 (file)
index 0000000..76bacad
--- /dev/null
@@ -0,0 +1,68 @@
+From 08ce9a1b72e38cf44c300a44ac5858533eb3c860 Mon Sep 17 00:00:00 2001
+From: Daniel Mack <daniel@zonque.org>
+Date: Thu, 23 Nov 2023 08:28:18 +0100
+Subject: serial: sc16is7xx: address RX timeout interrupt errata
+
+From: Daniel Mack <daniel@zonque.org>
+
+commit 08ce9a1b72e38cf44c300a44ac5858533eb3c860 upstream.
+
+This device has a silicon bug that makes it report a timeout interrupt
+but no data in the FIFO.
+
+The datasheet states the following in the errata section 18.1.4:
+
+  "If the host reads the receive FIFO at the same time as a
+  time-out interrupt condition happens, the host might read 0xCC
+  (time-out) in the Interrupt Indication Register (IIR), but bit 0
+  of the Line Status Register (LSR) is not set (means there is no
+  data in the receive FIFO)."
+
+The errata description seems to indicate it concerns only polled mode of
+operation when reading bit 0 of the LSR register. However, tests have
+shown and NXP has confirmed that the RXLVL register also yields 0 when
+the bug is triggered, and hence the IRQ driven implementation in this
+driver is equally affected.
+
+This bug has hit us on production units and when it does, sc16is7xx_irq()
+would spin forever because sc16is7xx_port_irq() keeps seeing an
+interrupt in the IIR register that is not cleared because the driver
+does not call into sc16is7xx_handle_rx() unless the RXLVL register
+reports at least one byte in the FIFO.
+
+Fix this by always reading one byte from the FIFO when this condition
+is detected in order to clear the interrupt. This approach was
+confirmed to be correct by NXP through their support channels.
+
+Tested by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+Signed-off-by: Daniel Mack <daniel@zonque.org>
+Co-Developed-by: Maxim Popov <maxim.snafu@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20231123072818.1394539-1-daniel@zonque.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/sc16is7xx.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -699,6 +699,18 @@ static bool sc16is7xx_port_irq(struct sc
+               case SC16IS7XX_IIR_RTOI_SRC:
+               case SC16IS7XX_IIR_XOFFI_SRC:
+                       rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
++
++                      /*
++                       * There is a silicon bug that makes the chip report a
++                       * time-out interrupt but no data in the FIFO. This is
++                       * described in errata section 18.1.4.
++                       *
++                       * When this happens, read one byte from the FIFO to
++                       * clear the interrupt.
++                       */
++                      if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
++                              rxlen = 1;
++
+                       if (rxlen)
+                               sc16is7xx_handle_rx(port, rxlen, iir);
+                       break;
index e11af40c116e75aae95dd18702be1b7c12b0aeca..afe8660c5dad40a2dbe206f26a5677639f9ed8c0 100644 (file)
@@ -14,3 +14,8 @@ tracing-always-update-snapshot-buffer-size.patch
 tracing-fix-incomplete-locking-when-disabling-buffered-events.patch
 tracing-fix-a-possible-race-when-disabling-buffered-events.patch
 packet-move-reference-count-in-packet_sock-to-atomic_long_t.patch
+parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch
+arm-pl011-fix-dma-support.patch
+serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch
+serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch
+kvm-s390-mm-properly-reset-no-dat.patch