]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop arm-pl011-fix-dma-support.patch from 4.14 and 4.19
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Dec 2023 10:40:51 +0000 (11:40 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Dec 2023 10:40:51 +0000 (11:40 +0100)
queue-4.14/arm-pl011-fix-dma-support.patch [deleted file]
queue-4.14/series
queue-4.19/arm-pl011-fix-dma-support.patch [deleted file]
queue-4.19/series

diff --git a/queue-4.14/arm-pl011-fix-dma-support.patch b/queue-4.14/arm-pl011-fix-dma-support.patch
deleted file mode 100644 (file)
index 88a3b92..0000000
+++ /dev/null
@@ -1,335 +0,0 @@
-From 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 Mon Sep 17 00:00:00 2001
-From: Arnd Bergmann <arnd@arndb.de>
-Date: Wed, 22 Nov 2023 18:15:03 +0100
-Subject: ARM: PL011: Fix DMA support
-
-From: Arnd Bergmann <arnd@arndb.de>
-
-commit 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 upstream.
-
-Since there is no guarantee that the memory returned by
-dma_alloc_coherent() is associated with a 'struct page', using the
-architecture specific phys_to_page() is wrong, but using
-virt_to_page() would be as well.
-
-Stop using sg lists altogether and just use the *_single() functions
-instead. This also simplifies the code a bit since the scatterlists in
-this driver always have only one entry anyway.
-
-https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
-    Use consistent names for dma buffers
-
-gc: Add a commit log from the initial thread:
-https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
-    Use consistent names for dma buffers
-
-Fixes: cb06ff102e2d7 ("ARM: PL011: Add support for Rx DMA buffer polling.")
-Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
-Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
-Cc: stable <stable@kernel.org>
-Link: https://lore.kernel.org/r/20231122171503.235649-1-gregory.clement@bootlin.com
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- drivers/tty/serial/amba-pl011.c |  112 +++++++++++++++++++---------------------
- 1 file changed, 54 insertions(+), 58 deletions(-)
-
---- a/drivers/tty/serial/amba-pl011.c
-+++ b/drivers/tty/serial/amba-pl011.c
-@@ -240,17 +240,18 @@ static struct vendor_data vendor_zte = {
- /* Deals with DMA transactions */
--struct pl011_sgbuf {
--      struct scatterlist sg;
--      char *buf;
-+struct pl011_dmabuf {
-+      dma_addr_t              dma;
-+      size_t                  len;
-+      char                    *buf;
- };
- struct pl011_dmarx_data {
-       struct dma_chan         *chan;
-       struct completion       complete;
-       bool                    use_buf_b;
--      struct pl011_sgbuf      sgbuf_a;
--      struct pl011_sgbuf      sgbuf_b;
-+      struct pl011_dmabuf     dbuf_a;
-+      struct pl011_dmabuf     dbuf_b;
-       dma_cookie_t            cookie;
-       bool                    running;
-       struct timer_list       timer;
-@@ -263,7 +264,8 @@ struct pl011_dmarx_data {
- struct pl011_dmatx_data {
-       struct dma_chan         *chan;
--      struct scatterlist      sg;
-+      dma_addr_t              dma;
-+      size_t                  len;
-       char                    *buf;
-       bool                    queued;
- };
-@@ -384,32 +386,24 @@ static int pl011_fifo_to_tty(struct uart
- #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
--static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
-+static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
-       enum dma_data_direction dir)
- {
--      dma_addr_t dma_addr;
--
--      sg->buf = dma_alloc_coherent(chan->device->dev,
--              PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
--      if (!sg->buf)
-+      db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
-+                                   &db->dma, GFP_KERNEL);
-+      if (!db->buf)
-               return -ENOMEM;
--
--      sg_init_table(&sg->sg, 1);
--      sg_set_page(&sg->sg, phys_to_page(dma_addr),
--              PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
--      sg_dma_address(&sg->sg) = dma_addr;
--      sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
-+      db->len = PL011_DMA_BUFFER_SIZE;
-       return 0;
- }
--static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
-+static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
-       enum dma_data_direction dir)
- {
--      if (sg->buf) {
-+      if (db->buf) {
-               dma_free_coherent(chan->device->dev,
--                      PL011_DMA_BUFFER_SIZE, sg->buf,
--                      sg_dma_address(&sg->sg));
-+                                PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
-       }
- }
-@@ -570,8 +564,8 @@ static void pl011_dma_tx_callback(void *
-       spin_lock_irqsave(&uap->port.lock, flags);
-       if (uap->dmatx.queued)
--              dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
--                           DMA_TO_DEVICE);
-+              dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
-+                              dmatx->len, DMA_TO_DEVICE);
-       dmacr = uap->dmacr;
-       uap->dmacr = dmacr & ~UART011_TXDMAE;
-@@ -657,18 +651,19 @@ static int pl011_dma_tx_refill(struct ua
-                       memcpy(&dmatx->buf[first], &xmit->buf[0], second);
-       }
--      dmatx->sg.length = count;
--
--      if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
-+      dmatx->len = count;
-+      dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
-+                                  DMA_TO_DEVICE);
-+      if (dmatx->dma == DMA_MAPPING_ERROR) {
-               uap->dmatx.queued = false;
-               dev_dbg(uap->port.dev, "unable to map TX DMA\n");
-               return -EBUSY;
-       }
--      desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
-+      desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
-                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc) {
--              dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
-+              dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
-               uap->dmatx.queued = false;
-               /*
-                * If DMA cannot be used right now, we complete this
-@@ -832,8 +827,8 @@ __acquires(&uap->port.lock)
-       dmaengine_terminate_async(uap->dmatx.chan);
-       if (uap->dmatx.queued) {
--              dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
--                           DMA_TO_DEVICE);
-+              dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
-+                               uap->dmatx.len, DMA_TO_DEVICE);
-               uap->dmatx.queued = false;
-               uap->dmacr &= ~UART011_TXDMAE;
-               pl011_write(uap->dmacr, uap, REG_DMACR);
-@@ -847,15 +842,15 @@ static int pl011_dma_rx_trigger_dma(stru
-       struct dma_chan *rxchan = uap->dmarx.chan;
-       struct pl011_dmarx_data *dmarx = &uap->dmarx;
-       struct dma_async_tx_descriptor *desc;
--      struct pl011_sgbuf *sgbuf;
-+      struct pl011_dmabuf *dbuf;
-       if (!rxchan)
-               return -EIO;
-       /* Start the RX DMA job */
--      sgbuf = uap->dmarx.use_buf_b ?
--              &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
--      desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
-+      dbuf = uap->dmarx.use_buf_b ?
-+              &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
-+      desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
-                                       DMA_DEV_TO_MEM,
-                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       /*
-@@ -895,8 +890,8 @@ static void pl011_dma_rx_chars(struct ua
-                              bool readfifo)
- {
-       struct tty_port *port = &uap->port.state->port;
--      struct pl011_sgbuf *sgbuf = use_buf_b ?
--              &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
-+      struct pl011_dmabuf *dbuf = use_buf_b ?
-+              &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
-       int dma_count = 0;
-       u32 fifotaken = 0; /* only used for vdbg() */
-@@ -905,7 +900,7 @@ static void pl011_dma_rx_chars(struct ua
-       if (uap->dmarx.poll_rate) {
-               /* The data can be taken by polling */
--              dmataken = sgbuf->sg.length - dmarx->last_residue;
-+              dmataken = dbuf->len - dmarx->last_residue;
-               /* Recalculate the pending size */
-               if (pending >= dmataken)
-                       pending -= dmataken;
-@@ -919,7 +914,7 @@ static void pl011_dma_rx_chars(struct ua
-                * Note that tty_insert_flip_buf() tries to take as many chars
-                * as it can.
-                */
--              dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
-+              dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
-                               pending);
-               uap->port.icount.rx += dma_count;
-@@ -930,7 +925,7 @@ static void pl011_dma_rx_chars(struct ua
-       /* Reset the last_residue for Rx DMA poll */
-       if (uap->dmarx.poll_rate)
--              dmarx->last_residue = sgbuf->sg.length;
-+              dmarx->last_residue = dbuf->len;
-       /*
-        * Only continue with trying to read the FIFO if all DMA chars have
-@@ -967,8 +962,8 @@ static void pl011_dma_rx_irq(struct uart
- {
-       struct pl011_dmarx_data *dmarx = &uap->dmarx;
-       struct dma_chan *rxchan = dmarx->chan;
--      struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
--              &dmarx->sgbuf_b : &dmarx->sgbuf_a;
-+      struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
-+              &dmarx->dbuf_b : &dmarx->dbuf_a;
-       size_t pending;
-       struct dma_tx_state state;
-       enum dma_status dmastat;
-@@ -990,7 +985,7 @@ static void pl011_dma_rx_irq(struct uart
-       pl011_write(uap->dmacr, uap, REG_DMACR);
-       uap->dmarx.running = false;
--      pending = sgbuf->sg.length - state.residue;
-+      pending = dbuf->len - state.residue;
-       BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
-       /* Then we terminate the transfer - we now know our residue */
-       dmaengine_terminate_all(rxchan);
-@@ -1017,8 +1012,8 @@ static void pl011_dma_rx_callback(void *
-       struct pl011_dmarx_data *dmarx = &uap->dmarx;
-       struct dma_chan *rxchan = dmarx->chan;
-       bool lastbuf = dmarx->use_buf_b;
--      struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
--              &dmarx->sgbuf_b : &dmarx->sgbuf_a;
-+      struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
-+              &dmarx->dbuf_b : &dmarx->dbuf_a;
-       size_t pending;
-       struct dma_tx_state state;
-       int ret;
-@@ -1036,7 +1031,7 @@ static void pl011_dma_rx_callback(void *
-        * the DMA irq handler. So we check the residue here.
-        */
-       rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
--      pending = sgbuf->sg.length - state.residue;
-+      pending = dbuf->len - state.residue;
-       BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
-       /* Then we terminate the transfer - we now know our residue */
-       dmaengine_terminate_all(rxchan);
-@@ -1088,16 +1083,16 @@ static void pl011_dma_rx_poll(unsigned l
-       unsigned long flags = 0;
-       unsigned int dmataken = 0;
-       unsigned int size = 0;
--      struct pl011_sgbuf *sgbuf;
-+      struct pl011_dmabuf *dbuf;
-       int dma_count;
-       struct dma_tx_state state;
--      sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
-+      dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
-       rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
-       if (likely(state.residue < dmarx->last_residue)) {
--              dmataken = sgbuf->sg.length - dmarx->last_residue;
-+              dmataken = dbuf->len - dmarx->last_residue;
-               size = dmarx->last_residue - state.residue;
--              dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
-+              dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
-                               size);
-               if (dma_count == size)
-                       dmarx->last_residue =  state.residue;
-@@ -1144,7 +1139,7 @@ static void pl011_dma_startup(struct uar
-               return;
-       }
--      sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
-+      uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
-       /* The DMA buffer is now the FIFO the TTY subsystem can use */
-       uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
-@@ -1154,7 +1149,7 @@ static void pl011_dma_startup(struct uar
-               goto skip_rx;
-       /* Allocate and map DMA RX buffers */
--      ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
-+      ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
-                              DMA_FROM_DEVICE);
-       if (ret) {
-               dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
-@@ -1162,12 +1157,12 @@ static void pl011_dma_startup(struct uar
-               goto skip_rx;
-       }
--      ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
-+      ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
-                              DMA_FROM_DEVICE);
-       if (ret) {
-               dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
-                       "RX buffer B", ret);
--              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
-+              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
-                                DMA_FROM_DEVICE);
-               goto skip_rx;
-       }
-@@ -1223,8 +1218,9 @@ static void pl011_dma_shutdown(struct ua
-               /* In theory, this should already be done by pl011_dma_flush_buffer */
-               dmaengine_terminate_all(uap->dmatx.chan);
-               if (uap->dmatx.queued) {
--                      dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
--                                   DMA_TO_DEVICE);
-+                      dma_unmap_single(uap->dmatx.chan->device->dev,
-+                                       uap->dmatx.dma, uap->dmatx.len,
-+                                       DMA_TO_DEVICE);
-                       uap->dmatx.queued = false;
-               }
-@@ -1235,8 +1231,8 @@ static void pl011_dma_shutdown(struct ua
-       if (uap->using_rx_dma) {
-               dmaengine_terminate_all(uap->dmarx.chan);
-               /* Clean up the RX DMA */
--              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
--              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
-+              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
-+              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
-               if (uap->dmarx.poll_rate)
-                       del_timer_sync(&uap->dmarx.timer);
-               uap->using_rx_dma = false;
index 8d4b265d7dc96c0d76d9b5fa974ec54c4fb28373..c9c1e43b8e57d1d4fd16c7e7ab4d86b9a0d13c59 100644 (file)
@@ -14,7 +14,6 @@ tracing-fix-incomplete-locking-when-disabling-buffered-events.patch
 tracing-fix-a-possible-race-when-disabling-buffered-events.patch
 packet-move-reference-count-in-packet_sock-to-atomic_long_t.patch
 parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch
-arm-pl011-fix-dma-support.patch
 serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch
 serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch
 kvm-s390-mm-properly-reset-no-dat.patch
diff --git a/queue-4.19/arm-pl011-fix-dma-support.patch b/queue-4.19/arm-pl011-fix-dma-support.patch
deleted file mode 100644 (file)
index f093df0..0000000
+++ /dev/null
@@ -1,335 +0,0 @@
-From 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 Mon Sep 17 00:00:00 2001
-From: Arnd Bergmann <arnd@arndb.de>
-Date: Wed, 22 Nov 2023 18:15:03 +0100
-Subject: ARM: PL011: Fix DMA support
-
-From: Arnd Bergmann <arnd@arndb.de>
-
-commit 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 upstream.
-
-Since there is no guarantee that the memory returned by
-dma_alloc_coherent() is associated with a 'struct page', using the
-architecture specific phys_to_page() is wrong, but using
-virt_to_page() would be as well.
-
-Stop using sg lists altogether and just use the *_single() functions
-instead. This also simplifies the code a bit since the scatterlists in
-this driver always have only one entry anyway.
-
-https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
-    Use consistent names for dma buffers
-
-gc: Add a commit log from the initial thread:
-https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
-    Use consistent names for dma buffers
-
-Fixes: cb06ff102e2d7 ("ARM: PL011: Add support for Rx DMA buffer polling.")
-Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
-Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
-Cc: stable <stable@kernel.org>
-Link: https://lore.kernel.org/r/20231122171503.235649-1-gregory.clement@bootlin.com
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- drivers/tty/serial/amba-pl011.c |  112 +++++++++++++++++++---------------------
- 1 file changed, 54 insertions(+), 58 deletions(-)
-
---- a/drivers/tty/serial/amba-pl011.c
-+++ b/drivers/tty/serial/amba-pl011.c
-@@ -227,17 +227,18 @@ static struct vendor_data vendor_zte = {
- /* Deals with DMA transactions */
--struct pl011_sgbuf {
--      struct scatterlist sg;
--      char *buf;
-+struct pl011_dmabuf {
-+      dma_addr_t              dma;
-+      size_t                  len;
-+      char                    *buf;
- };
- struct pl011_dmarx_data {
-       struct dma_chan         *chan;
-       struct completion       complete;
-       bool                    use_buf_b;
--      struct pl011_sgbuf      sgbuf_a;
--      struct pl011_sgbuf      sgbuf_b;
-+      struct pl011_dmabuf     dbuf_a;
-+      struct pl011_dmabuf     dbuf_b;
-       dma_cookie_t            cookie;
-       bool                    running;
-       struct timer_list       timer;
-@@ -250,7 +251,8 @@ struct pl011_dmarx_data {
- struct pl011_dmatx_data {
-       struct dma_chan         *chan;
--      struct scatterlist      sg;
-+      dma_addr_t              dma;
-+      size_t                  len;
-       char                    *buf;
-       bool                    queued;
- };
-@@ -371,32 +373,24 @@ static int pl011_fifo_to_tty(struct uart
- #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
--static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
-+static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
-       enum dma_data_direction dir)
- {
--      dma_addr_t dma_addr;
--
--      sg->buf = dma_alloc_coherent(chan->device->dev,
--              PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
--      if (!sg->buf)
-+      db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
-+                                   &db->dma, GFP_KERNEL);
-+      if (!db->buf)
-               return -ENOMEM;
--
--      sg_init_table(&sg->sg, 1);
--      sg_set_page(&sg->sg, phys_to_page(dma_addr),
--              PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
--      sg_dma_address(&sg->sg) = dma_addr;
--      sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
-+      db->len = PL011_DMA_BUFFER_SIZE;
-       return 0;
- }
--static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
-+static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
-       enum dma_data_direction dir)
- {
--      if (sg->buf) {
-+      if (db->buf) {
-               dma_free_coherent(chan->device->dev,
--                      PL011_DMA_BUFFER_SIZE, sg->buf,
--                      sg_dma_address(&sg->sg));
-+                                PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
-       }
- }
-@@ -557,8 +551,8 @@ static void pl011_dma_tx_callback(void *
-       spin_lock_irqsave(&uap->port.lock, flags);
-       if (uap->dmatx.queued)
--              dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
--                           DMA_TO_DEVICE);
-+              dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
-+                              dmatx->len, DMA_TO_DEVICE);
-       dmacr = uap->dmacr;
-       uap->dmacr = dmacr & ~UART011_TXDMAE;
-@@ -644,18 +638,19 @@ static int pl011_dma_tx_refill(struct ua
-                       memcpy(&dmatx->buf[first], &xmit->buf[0], second);
-       }
--      dmatx->sg.length = count;
--
--      if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
-+      dmatx->len = count;
-+      dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
-+                                  DMA_TO_DEVICE);
-+      if (dmatx->dma == DMA_MAPPING_ERROR) {
-               uap->dmatx.queued = false;
-               dev_dbg(uap->port.dev, "unable to map TX DMA\n");
-               return -EBUSY;
-       }
--      desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
-+      desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
-                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc) {
--              dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
-+              dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
-               uap->dmatx.queued = false;
-               /*
-                * If DMA cannot be used right now, we complete this
-@@ -819,8 +814,8 @@ __acquires(&uap->port.lock)
-       dmaengine_terminate_async(uap->dmatx.chan);
-       if (uap->dmatx.queued) {
--              dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
--                           DMA_TO_DEVICE);
-+              dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
-+                               uap->dmatx.len, DMA_TO_DEVICE);
-               uap->dmatx.queued = false;
-               uap->dmacr &= ~UART011_TXDMAE;
-               pl011_write(uap->dmacr, uap, REG_DMACR);
-@@ -834,15 +829,15 @@ static int pl011_dma_rx_trigger_dma(stru
-       struct dma_chan *rxchan = uap->dmarx.chan;
-       struct pl011_dmarx_data *dmarx = &uap->dmarx;
-       struct dma_async_tx_descriptor *desc;
--      struct pl011_sgbuf *sgbuf;
-+      struct pl011_dmabuf *dbuf;
-       if (!rxchan)
-               return -EIO;
-       /* Start the RX DMA job */
--      sgbuf = uap->dmarx.use_buf_b ?
--              &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
--      desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
-+      dbuf = uap->dmarx.use_buf_b ?
-+              &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
-+      desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
-                                       DMA_DEV_TO_MEM,
-                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       /*
-@@ -882,8 +877,8 @@ static void pl011_dma_rx_chars(struct ua
-                              bool readfifo)
- {
-       struct tty_port *port = &uap->port.state->port;
--      struct pl011_sgbuf *sgbuf = use_buf_b ?
--              &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
-+      struct pl011_dmabuf *dbuf = use_buf_b ?
-+              &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
-       int dma_count = 0;
-       u32 fifotaken = 0; /* only used for vdbg() */
-@@ -892,7 +887,7 @@ static void pl011_dma_rx_chars(struct ua
-       if (uap->dmarx.poll_rate) {
-               /* The data can be taken by polling */
--              dmataken = sgbuf->sg.length - dmarx->last_residue;
-+              dmataken = dbuf->len - dmarx->last_residue;
-               /* Recalculate the pending size */
-               if (pending >= dmataken)
-                       pending -= dmataken;
-@@ -906,7 +901,7 @@ static void pl011_dma_rx_chars(struct ua
-                * Note that tty_insert_flip_buf() tries to take as many chars
-                * as it can.
-                */
--              dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
-+              dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
-                               pending);
-               uap->port.icount.rx += dma_count;
-@@ -917,7 +912,7 @@ static void pl011_dma_rx_chars(struct ua
-       /* Reset the last_residue for Rx DMA poll */
-       if (uap->dmarx.poll_rate)
--              dmarx->last_residue = sgbuf->sg.length;
-+              dmarx->last_residue = dbuf->len;
-       /*
-        * Only continue with trying to read the FIFO if all DMA chars have
-@@ -954,8 +949,8 @@ static void pl011_dma_rx_irq(struct uart
- {
-       struct pl011_dmarx_data *dmarx = &uap->dmarx;
-       struct dma_chan *rxchan = dmarx->chan;
--      struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
--              &dmarx->sgbuf_b : &dmarx->sgbuf_a;
-+      struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
-+              &dmarx->dbuf_b : &dmarx->dbuf_a;
-       size_t pending;
-       struct dma_tx_state state;
-       enum dma_status dmastat;
-@@ -977,7 +972,7 @@ static void pl011_dma_rx_irq(struct uart
-       pl011_write(uap->dmacr, uap, REG_DMACR);
-       uap->dmarx.running = false;
--      pending = sgbuf->sg.length - state.residue;
-+      pending = dbuf->len - state.residue;
-       BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
-       /* Then we terminate the transfer - we now know our residue */
-       dmaengine_terminate_all(rxchan);
-@@ -1004,8 +999,8 @@ static void pl011_dma_rx_callback(void *
-       struct pl011_dmarx_data *dmarx = &uap->dmarx;
-       struct dma_chan *rxchan = dmarx->chan;
-       bool lastbuf = dmarx->use_buf_b;
--      struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
--              &dmarx->sgbuf_b : &dmarx->sgbuf_a;
-+      struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
-+              &dmarx->dbuf_b : &dmarx->dbuf_a;
-       size_t pending;
-       struct dma_tx_state state;
-       int ret;
-@@ -1023,7 +1018,7 @@ static void pl011_dma_rx_callback(void *
-        * the DMA irq handler. So we check the residue here.
-        */
-       rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
--      pending = sgbuf->sg.length - state.residue;
-+      pending = dbuf->len - state.residue;
-       BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
-       /* Then we terminate the transfer - we now know our residue */
-       dmaengine_terminate_all(rxchan);
-@@ -1075,16 +1070,16 @@ static void pl011_dma_rx_poll(struct tim
-       unsigned long flags = 0;
-       unsigned int dmataken = 0;
-       unsigned int size = 0;
--      struct pl011_sgbuf *sgbuf;
-+      struct pl011_dmabuf *dbuf;
-       int dma_count;
-       struct dma_tx_state state;
--      sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
-+      dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
-       rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
-       if (likely(state.residue < dmarx->last_residue)) {
--              dmataken = sgbuf->sg.length - dmarx->last_residue;
-+              dmataken = dbuf->len - dmarx->last_residue;
-               size = dmarx->last_residue - state.residue;
--              dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
-+              dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
-                               size);
-               if (dma_count == size)
-                       dmarx->last_residue =  state.residue;
-@@ -1131,7 +1126,7 @@ static void pl011_dma_startup(struct uar
-               return;
-       }
--      sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
-+      uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
-       /* The DMA buffer is now the FIFO the TTY subsystem can use */
-       uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
-@@ -1141,7 +1136,7 @@ static void pl011_dma_startup(struct uar
-               goto skip_rx;
-       /* Allocate and map DMA RX buffers */
--      ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
-+      ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
-                              DMA_FROM_DEVICE);
-       if (ret) {
-               dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
-@@ -1149,12 +1144,12 @@ static void pl011_dma_startup(struct uar
-               goto skip_rx;
-       }
--      ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
-+      ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
-                              DMA_FROM_DEVICE);
-       if (ret) {
-               dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
-                       "RX buffer B", ret);
--              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
-+              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
-                                DMA_FROM_DEVICE);
-               goto skip_rx;
-       }
-@@ -1208,8 +1203,9 @@ static void pl011_dma_shutdown(struct ua
-               /* In theory, this should already be done by pl011_dma_flush_buffer */
-               dmaengine_terminate_all(uap->dmatx.chan);
-               if (uap->dmatx.queued) {
--                      dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
--                                   DMA_TO_DEVICE);
-+                      dma_unmap_single(uap->dmatx.chan->device->dev,
-+                                       uap->dmatx.dma, uap->dmatx.len,
-+                                       DMA_TO_DEVICE);
-                       uap->dmatx.queued = false;
-               }
-@@ -1220,8 +1216,8 @@ static void pl011_dma_shutdown(struct ua
-       if (uap->using_rx_dma) {
-               dmaengine_terminate_all(uap->dmarx.chan);
-               /* Clean up the RX DMA */
--              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
--              pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
-+              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
-+              pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
-               if (uap->dmarx.poll_rate)
-                       del_timer_sync(&uap->dmarx.timer);
-               uap->using_rx_dma = false;
index 63d7111d408b30e2076f8602241fa67fabb0e03e..23a67ab28339f9e2831379abc611a37d888c5058 100644 (file)
@@ -38,7 +38,6 @@ gpiolib-sysfs-fix-error-handling-on-failed-export.patch
 usb-gadget-f_hid-fix-report-descriptor-allocation.patch
 parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch
 usb-typec-class-fix-typec_altmode_put_partner-to-put-plugs.patch
-arm-pl011-fix-dma-support.patch
 serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch
 serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch
 x86-cpu-amd-check-vendor-in-the-amd-microcode-callback.patch