]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .27 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Fri, 9 Jan 2009 00:27:54 +0000 (16:27 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 9 Jan 2009 00:27:54 +0000 (16:27 -0800)
queue-2.6.27/async_xor-dma_map-destination-dma_bidirectional.patch [new file with mode: 0644]
queue-2.6.27/dmaengine-protect-id-from-concurrent-registrations.patch [new file with mode: 0644]
queue-2.6.27/ioat-wait-for-self-test-completion.patch [new file with mode: 0644]
queue-2.6.27/series

diff --git a/queue-2.6.27/async_xor-dma_map-destination-dma_bidirectional.patch b/queue-2.6.27/async_xor-dma_map-destination-dma_bidirectional.patch
new file mode 100644 (file)
index 0000000..82f687b
--- /dev/null
@@ -0,0 +1,115 @@
+From a06d568f7c5e40e34ea64881842deb8f4382babf Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 8 Dec 2008 13:46:00 -0700
+Subject: async_xor: dma_map destination DMA_BIDIRECTIONAL
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit a06d568f7c5e40e34ea64881842deb8f4382babf upstream.
+
+Mapping the destination multiple times is a misuse of the dma-api.
+Since the destination may be reused as a source, ensure that it is only
+mapped once and that it is mapped bidirectionally.  This appears to add
+ugliness on the unmap side in that it always reads back the destination
+address from the descriptor, but gcc can determine that dma_unmap is a
+nop and not emit the code that calculates its arguments.
+
+Cc: Saeed Bishara <saeed@marvell.com>
+Acked-by: Yuri Tikhonov <yur@emcraft.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ crypto/async_tx/async_xor.c |   11 +++++++++--
+ drivers/dma/iop-adma.c      |   16 +++++++++++++---
+ drivers/dma/mv_xor.c        |   15 ++++++++++++---
+ 3 files changed, 34 insertions(+), 8 deletions(-)
+
+--- a/crypto/async_tx/async_xor.c
++++ b/crypto/async_tx/async_xor.c
+@@ -53,10 +53,17 @@ do_async_xor(struct dma_chan *chan, stru
+       int xor_src_cnt;
+       dma_addr_t dma_dest;
+-      dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
+-      for (i = 0; i < src_cnt; i++)
++      /* map the dest bidrectional in case it is re-used as a source */
++      dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
++      for (i = 0; i < src_cnt; i++) {
++              /* only map the dest once */
++              if (unlikely(src_list[i] == dest)) {
++                      dma_src[i] = dma_dest;
++                      continue;
++              }
+               dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
+                                         len, DMA_TO_DEVICE);
++      }
+       while (src_cnt) {
+               async_flags = flags;
+--- a/drivers/dma/iop-adma.c
++++ b/drivers/dma/iop-adma.c
+@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct 
+                       enum dma_ctrl_flags flags = desc->async_tx.flags;
+                       u32 src_cnt;
+                       dma_addr_t addr;
++                      dma_addr_t dest;
++                      src_cnt = unmap->unmap_src_cnt;
++                      dest = iop_desc_get_dest_addr(unmap, iop_chan);
+                       if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+-                              addr = iop_desc_get_dest_addr(unmap, iop_chan);
+-                              dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
++                              enum dma_data_direction dir;
++
++                              if (src_cnt > 1) /* is xor? */
++                                      dir = DMA_BIDIRECTIONAL;
++                              else
++                                      dir = DMA_FROM_DEVICE;
++
++                              dma_unmap_page(dev, dest, len, dir);
+                       }
+                       if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+-                              src_cnt = unmap->unmap_src_cnt;
+                               while (src_cnt--) {
+                                       addr = iop_desc_get_src_addr(unmap,
+                                                                    iop_chan,
+                                                                    src_cnt);
++                                      if (addr == dest)
++                                              continue;
+                                       dma_unmap_page(dev, addr, len,
+                                                      DMA_TO_DEVICE);
+                               }
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv
+                       enum dma_ctrl_flags flags = desc->async_tx.flags;
+                       u32 src_cnt;
+                       dma_addr_t addr;
++                      dma_addr_t dest;
++                      src_cnt = unmap->unmap_src_cnt;
++                      dest = mv_desc_get_dest_addr(unmap);
+                       if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+-                              addr = mv_desc_get_dest_addr(unmap);
+-                              dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
++                              enum dma_data_direction dir;
++
++                              if (src_cnt > 1) /* is xor ? */
++                                      dir = DMA_BIDIRECTIONAL;
++                              else
++                                      dir = DMA_FROM_DEVICE;
++                              dma_unmap_page(dev, dest, len, dir);
+                       }
+                       if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+-                              src_cnt = unmap->unmap_src_cnt;
+                               while (src_cnt--) {
+                                       addr = mv_desc_get_src_addr(unmap,
+                                                                   src_cnt);
++                                      if (addr == dest)
++                                              continue;
+                                       dma_unmap_page(dev, addr, len,
+                                                      DMA_TO_DEVICE);
+                               }
diff --git a/queue-2.6.27/dmaengine-protect-id-from-concurrent-registrations.patch b/queue-2.6.27/dmaengine-protect-id-from-concurrent-registrations.patch
new file mode 100644 (file)
index 0000000..8a43c62
--- /dev/null
@@ -0,0 +1,32 @@
+From b0b42b16ff2b90f17bc1a4308366c9beba4b276e Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Dec 2008 17:17:07 -0700
+Subject: dmaengine: protect 'id' from concurrent registrations
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit b0b42b16ff2b90f17bc1a4308366c9beba4b276e upstream.
+
+There is a possibility to have two devices registered with the same id.
+
+Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/dma/dmaengine.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma
+       init_completion(&device->done);
+       kref_init(&device->refcount);
++
++      mutex_lock(&dma_list_mutex);
+       device->dev_id = id++;
++      mutex_unlock(&dma_list_mutex);
+       /* represent channels in sysfs. Probably want devs too */
+       list_for_each_entry(chan, &device->channels, device_node) {
diff --git a/queue-2.6.27/ioat-wait-for-self-test-completion.patch b/queue-2.6.27/ioat-wait-for-self-test-completion.patch
new file mode 100644 (file)
index 0000000..f52dc1e
--- /dev/null
@@ -0,0 +1,47 @@
+From 532d3b1f86f41834a25373e3ded981d68e4ce17f Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Dec 2008 17:16:55 -0700
+Subject: ioat: wait for self-test completion
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 532d3b1f86f41834a25373e3ded981d68e4ce17f upstream.
+
+As part of the ioat_dma self-test it performs a printk from a completion
+callback.  Depending on the system console configuration this output can
+take longer than a millisecond causing the self-test to fail.  Introduce a
+completion with a generous timeout to mitigate this failure.
+
+Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/dma/ioat_dma.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/dma/ioat_dma.c
++++ b/drivers/dma/ioat_dma.c
+@@ -1337,10 +1337,12 @@ static void ioat_dma_start_null_desc(str
+  */
+ #define IOAT_TEST_SIZE 2000
++DECLARE_COMPLETION(test_completion);
+ static void ioat_dma_test_callback(void *dma_async_param)
+ {
+       printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
+               dma_async_param);
++      complete(&test_completion);
+ }
+ /**
+@@ -1406,7 +1408,8 @@ static int ioat_dma_self_test(struct ioa
+               goto free_resources;
+       }
+       device->common.device_issue_pending(dma_chan);
+-      msleep(1);
++
++      wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
+       if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+                                       != DMA_SUCCESS) {
index 54d9f556fb3ad0bc1a298e98743aaf977a682d73..000bb5db3886178457a23e2542404618d9121fc5 100644 (file)
@@ -4,3 +4,6 @@ usb-storage-unusual_devs.h-nokia-3109c-addition.patch
 usb-unusual-devs-patch-for-nokia-3500c.patch
 powerpc-fix-corruption-error-in-rh_alloc_fixed.patch
 iwlagn-downgrade-bug_on-in-interrupt.patch
+async_xor-dma_map-destination-dma_bidirectional.patch
+dmaengine-protect-id-from-concurrent-registrations.patch
+ioat-wait-for-self-test-completion.patch