--- /dev/null
+From stable-bounces@linux.kernel.org Mon Jul 14 00:25:34 2008
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Mon, 14 Jul 2008 14:46:07 +0800
+Subject: crypto: chainiv - Invoke completion function
+To: stable@kernel.org
+Message-ID: <20080714064607.GA26660@gondor.apana.org.au>
+Content-Disposition: inline
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+Upstream commit: 872ac8743cb400192a9fce4ba2d3ffd7bb309685
+
+When chainiv postpones requests it never calls their completion functions.
+This causes symptoms such as memory leaks when IPsec is in use.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ crypto/chainiv.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/crypto/chainiv.c
++++ b/crypto/chainiv.c
+@@ -117,6 +117,7 @@ static int chainiv_init(struct crypto_tf
+ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
+ {
+ int queued;
++ int err = ctx->err;
+
+ if (!ctx->queue.qlen) {
+ smp_mb__before_clear_bit();
+@@ -131,7 +132,7 @@ static int async_chainiv_schedule_work(s
+ BUG_ON(!queued);
+
+ out:
+- return ctx->err;
++ return err;
+ }
+
+ static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
+@@ -227,6 +228,7 @@ static void async_chainiv_do_postponed(s
+ postponed);
+ struct skcipher_givcrypt_request *req;
+ struct ablkcipher_request *subreq;
++ int err;
+
+ /* Only handle one request at a time to avoid hogging keventd. */
+ spin_lock_bh(&ctx->lock);
+@@ -241,7 +243,11 @@ static void async_chainiv_do_postponed(s
+ subreq = skcipher_givcrypt_reqctx(req);
+ subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- async_chainiv_givencrypt_tail(req);
++ err = async_chainiv_givencrypt_tail(req);
++
++ local_bh_disable();
++ skcipher_givcrypt_complete(req, err);
++ local_bh_enable();
+ }
+
+ static int async_chainiv_init(struct crypto_tfm *tfm)
--- /dev/null
+From stable-bounces@linux.kernel.org Thu Jul 3 17:53:05 2008
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 3 Jul 2008 14:31:26 -0400 (EDT)
+Subject: hrtimer: prevent migration for raising softirq
+Cc: akpm@osdl.org, Peter Zijlstra <peterz@infradead.org>, Thomas Gleixner <tglx@linutronix.de>, Linus Torvalds <torvalds@linux-foundation.org>, stable@kernel.org
+Message-ID: <Pine.LNX.4.58.0807031428440.7798@gandalf.stny.rr.com>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit ee3ece830f6db9837f7ac67008f532a8c1e755f4 upstream.
+
+Due to a possible deadlock, the waking of the softirq was pushed outside
+of the hrtimer base locks. See commit 0c96c5979a522c3323c30a078a70120e29b5bdbc
+
+Unfortunately this allows the task to migrate after setting up the softirq
+and raising it. Since softirqs run a queue that is per-cpu we may raise the
+softirq on the wrong CPU and this will keep the queued softirq task from
+running.
+
+To solve this issue, this patch disables preemption around the releasing
+of the hrtimer lock and raising of the softirq.
+
+Signed-off-by: Steven Rostedt <srostedt@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/hrtimer.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -896,10 +896,18 @@ hrtimer_start(struct hrtimer *timer, kti
+ */
+ raise = timer->state == HRTIMER_STATE_PENDING;
+
++ /*
++ * We use preempt_disable to prevent this task from migrating after
++ * setting up the softirq and raising it. Otherwise, if me migrate
++ * we will raise the softirq on the wrong CPU.
++ */
++ preempt_disable();
++
+ unlock_hrtimer_base(timer, &flags);
+
+ if (raise)
+ hrtimer_raise_softirq();
++ preempt_enable();
+
+ return ret;
+ }
--- /dev/null
+From stable-bounces@linux.kernel.org Fri Jul 4 04:31:50 2008
+From: Pierre Ossman <drzeus-list@drzeus.cx>
+Date: Fri, 4 Jul 2008 12:51:20 +0200
+Subject: mmc: don't use DMA on newer ENE controllers
+To: Linus Torvalds <torvalds@linux-foundation.org>
+Message-ID: <20080704125120.6ccaadf5@mjolnir.drzeus.cx>
+
+From: Pierre Ossman <drzeus@drzeus.cx>
+
+commit bf5b1935d8e42b36a34645788eb261461fe07f2e upstream.
+
+Even the newer ENE controllers have bugs in their DMA engine that make
+it too dangerous to use. Disable it until someone has figured out under
+which conditions it corrupts data.
+
+This has caused problems at least once, and can be found as bug report
+10925 in the kernel bugzilla.
+
+Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mmc/host/sdhci.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -109,7 +109,8 @@ static const struct pci_device_id pci_id
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
+- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
++ SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
++ SDHCI_QUIRK_BROKEN_DMA,
+ },
+
+ {
+@@ -118,7 +119,8 @@ static const struct pci_device_id pci_id
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
+- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
++ SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
++ SDHCI_QUIRK_BROKEN_DMA,
+ },
+
+ {
--- /dev/null
+From ba0fc709e197415aadd46b9ec208dc4abaa21edd Mon Sep 17 00:00:00 2001
+From: Vitaly Bordug <vitb@kernel.crashing.org>
+Date: Wed, 9 Jul 2008 13:13:38 +1000
+Subject: powerpc: Add missing reference to coherent_dma_mask
+
+From: Vitaly Bordug <vitb@kernel.crashing.org>
+
+commit ba0fc709e197415aadd46b9ec208dc4abaa21edd upstream
+
+There is dma_mask in of_device upon of_platform_device_create()
+but we don't actually set coherent_dma_mask. This may cause weird
+behavior of USB subsystem using of_device USB host drivers.
+
+Signed-off-by: Vitaly Bordug <vitb@kernel.crashing.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/kernel/of_platform.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/powerpc/kernel/of_platform.c
++++ b/arch/powerpc/kernel/of_platform.c
+@@ -76,6 +76,8 @@ struct of_device* of_platform_device_cre
+ return NULL;
+
+ dev->dma_mask = 0xffffffffUL;
++ dev->dev.coherent_dma_mask = DMA_32BIT_MASK;
++
+ dev->dev.bus = &of_platform_bus_type;
+
+ /* We do not fill the DMA ops for platform devices by default.
--- /dev/null
+From stable-bounces@linux.kernel.org Sat Jul 5 16:15:53 2008
+From: Pierre Ossman <drzeus-list@drzeus.cx>
+Date: Sun, 6 Jul 2008 01:15:34 +0200
+Subject: pxamci: fix byte aligned DMA transfers
+To: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Philipp Zabel <philipp.zabel@gmail.com>, Stable branch <stable@kernel.org>
+Message-ID: <20080706011534.6dc71f5a@mjolnir.drzeus.cx>
+
+From: Philipp Zabel <philipp.zabel@gmail.com>
+
+commit 97f8571e663c808ad2d01a396627235167291556 upstream
+
+The pxa27x DMA controller defaults to 64-bit alignment. This caused
+the SCR reads to fail (and, depending on card type, error out) when
+card->raw_scr was not aligned on a 8-byte boundary.
+
+For performance reasons all scatter-gather addresses passed to
+pxamci_request should be aligned on 8-byte boundaries, but if
+this can't be guaranteed, byte aligned DMA transfers in the
+have to be enabled in the controller to get correct behaviour.
+
+Signed-off-by: Philipp Zabel <philipp.zabel@gmail.com>
+Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mmc/host/pxamci.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/mmc/host/pxamci.c
++++ b/drivers/mmc/host/pxamci.c
+@@ -114,6 +114,7 @@ static void pxamci_setup_data(struct pxa
+ unsigned int nob = data->blocks;
+ unsigned long long clks;
+ unsigned int timeout;
++ bool dalgn = 0;
+ u32 dcmd;
+ int i;
+
+@@ -152,6 +153,9 @@ static void pxamci_setup_data(struct pxa
+ host->sg_cpu[i].dcmd = dcmd | length;
+ if (length & 31 && !(data->flags & MMC_DATA_READ))
+ host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
++ /* Not aligned to 8-byte boundary? */
++ if (sg_dma_address(&data->sg[i]) & 0x7)
++ dalgn = 1;
+ if (data->flags & MMC_DATA_READ) {
+ host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
+ host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
+@@ -165,6 +169,15 @@ static void pxamci_setup_data(struct pxa
+ host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
+ wmb();
+
++ /*
++ * The PXA27x DMA controller encounters overhead when working with
++ * unaligned (to 8-byte boundaries) data, so switch on byte alignment
++ * mode only if we have unaligned data.
++ */
++ if (dalgn)
++ DALGN |= (1 << host->dma);
++ else
++ DALGN &= (1 << host->dma);
+ DDADR(host->dma) = host->sg_dma;
+ DCSR(host->dma) = DCSR_RUN;
+ }
drivers-isdn-i4l-isdn_common.c-fix-small-resource-leak.patch
drivers-char-pcmcia-ipwireless-hardware.c-fix-resource-leak.patch
scsi-mptspi-fix-oops-in-mptspi_dv_renegotiate_work.patch
+crypto-chainiv-invoke-completion-function.patch
+powerpc-add-missing-reference-to-coherent_dma_mask.patch
+pxamci-fix-byte-aligned-dma-transfers.patch
+mmc-don-t-use-dma-on-newer-ene-controllers.patch
+hrtimer-prevent-migration-for-raising-softirq.patch