]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .29 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Wed, 29 Apr 2009 20:43:55 +0000 (13:43 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 29 Apr 2009 20:43:55 +0000 (13:43 -0700)
queue-2.6.29/b44-use-kernel-dma-addresses-for-the-kernel-dma-api.patch [new file with mode: 0644]
queue-2.6.29/block-include-empty-disks-in-proc-diskstats.patch [new file with mode: 0644]
queue-2.6.29/crypto-ixp4xx-fix-handling-of-chained-sg-buffers.patch [new file with mode: 0644]
queue-2.6.29/exit_notify-kill-the-wrong-capable-check.patch [new file with mode: 0644]
queue-2.6.29/pci-fix-incorrect-mask-of-pm-no_soft_reset-bit.patch [new file with mode: 0644]
queue-2.6.29/series

diff --git a/queue-2.6.29/b44-use-kernel-dma-addresses-for-the-kernel-dma-api.patch b/queue-2.6.29/b44-use-kernel-dma-addresses-for-the-kernel-dma-api.patch
new file mode 100644 (file)
index 0000000..0aceff2
--- /dev/null
@@ -0,0 +1,34 @@
+From 37efa239901493694a48f1d6f59f8de17c2c4509 Mon Sep 17 00:00:00 2001
+From: Michael Buesch <mb@bu3sch.de>
+Date: Mon, 6 Apr 2009 09:52:27 +0000
+Subject: b44: Use kernel DMA addresses for the kernel DMA API
+
+From: Michael Buesch <mb@bu3sch.de>
+
+commit 37efa239901493694a48f1d6f59f8de17c2c4509 upstream.
+
+We must not use the device DMA addresses for the kernel DMA API, because
+device DMA addresses have an additional offset added for the SSB translation.
+
+Use the original dma_addr_t for the sync operation.
+
+Cc: stable@kernel.org
+Signed-off-by: Michael Buesch <mb@bu3sch.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/b44.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/b44.c
++++ b/drivers/net/b44.c
+@@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *b
+                                            dest_idx * sizeof(dest_desc),
+                                            DMA_BIDIRECTIONAL);
+-      ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr),
++      ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
+                                      RX_PKT_BUF_SZ,
+                                      DMA_FROM_DEVICE);
+ }
diff --git a/queue-2.6.29/block-include-empty-disks-in-proc-diskstats.patch b/queue-2.6.29/block-include-empty-disks-in-proc-diskstats.patch
new file mode 100644 (file)
index 0000000..08983cc
--- /dev/null
@@ -0,0 +1,81 @@
+From 71982a409f12c50d011325a4471aa20666bb908d Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 17 Apr 2009 08:34:48 +0200
+Subject: block: include empty disks in /proc/diskstats
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 71982a409f12c50d011325a4471aa20666bb908d upstream.
+
+/proc/diskstats used to show stats for all disks whether they're
+zero-sized or not and their non-zero partitions.  Commit
+074a7aca7afa6f230104e8e65eba3420263714a5 accidentally changed the
+behavior such that it doesn't print out zero sized disks.  This patch
+implements DISK_PITER_INCL_EMPTY_PART0 flag to partition iterator and
+uses it in diskstats_show() such that empty part0 is shown in
+/proc/diskstats.
+
+Reported and bisectd by Dianel Collins.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Daniel Collins <solemnwarning@solemnwarning.no-ip.org>
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/genhd.c         |   12 ++++++++----
+ include/linux/genhd.h |    1 +
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -98,7 +98,7 @@ void disk_part_iter_init(struct disk_par
+       if (flags & DISK_PITER_REVERSE)
+               piter->idx = ptbl->len - 1;
+-      else if (flags & DISK_PITER_INCL_PART0)
++      else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
+               piter->idx = 0;
+       else
+               piter->idx = 1;
+@@ -134,7 +134,8 @@ struct hd_struct *disk_part_iter_next(st
+       /* determine iteration parameters */
+       if (piter->flags & DISK_PITER_REVERSE) {
+               inc = -1;
+-              if (piter->flags & DISK_PITER_INCL_PART0)
++              if (piter->flags & (DISK_PITER_INCL_PART0 |
++                                  DISK_PITER_INCL_EMPTY_PART0))
+                       end = -1;
+               else
+                       end = 0;
+@@ -150,7 +151,10 @@ struct hd_struct *disk_part_iter_next(st
+               part = rcu_dereference(ptbl->part[piter->idx]);
+               if (!part)
+                       continue;
+-              if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects)
++              if (!part->nr_sects &&
++                  !(piter->flags & DISK_PITER_INCL_EMPTY) &&
++                  !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
++                    piter->idx == 0))
+                       continue;
+               get_device(part_to_dev(part));
+@@ -1011,7 +1015,7 @@ static int diskstats_show(struct seq_fil
+                               "\n\n");
+       */
+  
+-      disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0);
++      disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
+       while ((hd = disk_part_iter_next(&piter))) {
+               cpu = part_stat_lock();
+               part_round_stats(cpu, hd);
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -214,6 +214,7 @@ static inline void disk_put_part(struct 
+ #define DISK_PITER_REVERSE    (1 << 0) /* iterate in the reverse direction */
+ #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
+ #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
++#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
+ struct disk_part_iter {
+       struct gendisk          *disk;
diff --git a/queue-2.6.29/crypto-ixp4xx-fix-handling-of-chained-sg-buffers.patch b/queue-2.6.29/crypto-ixp4xx-fix-handling-of-chained-sg-buffers.patch
new file mode 100644 (file)
index 0000000..a9d5bd6
--- /dev/null
@@ -0,0 +1,390 @@
+From 0d44dc59b2b434b29aafeae581d06f81efac7c83 Mon Sep 17 00:00:00 2001
+From: Christian Hohnstaedt <chohnstaedt@innominate.com>
+Date: Fri, 27 Mar 2009 15:09:05 +0800
+Subject: crypto: ixp4xx - Fix handling of chained sg buffers
+
+From: Christian Hohnstaedt <chohnstaedt@innominate.com>
+
+commit 0d44dc59b2b434b29aafeae581d06f81efac7c83 upstream.
+
+ - keep dma functions away from chained scatterlists.
+   Use the existing scatterlist iteration inside the driver
+   to call dma_map_single() for each chunk and avoid dma_map_sg().
+
+Signed-off-by: Christian Hohnstaedt <chohnstaedt@innominate.com>
+Tested-By:  Karl Hiramoto <karl@hiramoto.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/crypto/ixp4xx_crypto.c |  184 ++++++++++++++---------------------------
+ 1 file changed, 64 insertions(+), 120 deletions(-)
+
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -101,6 +101,7 @@ struct buffer_desc {
+       u32 phys_addr;
+       u32 __reserved[4];
+       struct buffer_desc *next;
++      enum dma_data_direction dir;
+ };
+ struct crypt_ctl {
+@@ -132,14 +133,10 @@ struct crypt_ctl {
+ struct ablk_ctx {
+       struct buffer_desc *src;
+       struct buffer_desc *dst;
+-      unsigned src_nents;
+-      unsigned dst_nents;
+ };
+ struct aead_ctx {
+       struct buffer_desc *buffer;
+-      unsigned short assoc_nents;
+-      unsigned short src_nents;
+       struct scatterlist ivlist;
+       /* used when the hmac is not on one sg entry */
+       u8 *hmac_virt;
+@@ -312,7 +309,7 @@ static struct crypt_ctl *get_crypt_desc_
+       }
+ }
+-static void free_buf_chain(struct buffer_desc *buf, u32 phys)
++static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
+ {
+       while (buf) {
+               struct buffer_desc *buf1;
+@@ -320,6 +317,7 @@ static void free_buf_chain(struct buffer
+               buf1 = buf->next;
+               phys1 = buf->phys_next;
++              dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
+               dma_pool_free(buffer_pool, buf, phys);
+               buf = buf1;
+               phys = phys1;
+@@ -348,7 +346,6 @@ static void one_packet(dma_addr_t phys)
+       struct crypt_ctl *crypt;
+       struct ixp_ctx *ctx;
+       int failed;
+-      enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       failed = phys & 0x1 ? -EBADMSG : 0;
+       phys &= ~0x3;
+@@ -358,13 +355,8 @@ static void one_packet(dma_addr_t phys)
+       case CTL_FLAG_PERFORM_AEAD: {
+               struct aead_request *req = crypt->data.aead_req;
+               struct aead_ctx *req_ctx = aead_request_ctx(req);
+-              dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
+-                              DMA_TO_DEVICE);
+-              dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
+-              dma_unmap_sg(dev, req->src, req_ctx->src_nents,
+-                              DMA_BIDIRECTIONAL);
+-              free_buf_chain(req_ctx->buffer, crypt->src_buf);
++              free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+               if (req_ctx->hmac_virt) {
+                       finish_scattered_hmac(crypt);
+               }
+@@ -374,16 +366,11 @@ static void one_packet(dma_addr_t phys)
+       case CTL_FLAG_PERFORM_ABLK: {
+               struct ablkcipher_request *req = crypt->data.ablk_req;
+               struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+-              int nents;
++
+               if (req_ctx->dst) {
+-                      nents = req_ctx->dst_nents;
+-                      dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
+-                      free_buf_chain(req_ctx->dst, crypt->dst_buf);
+-                      src_direction = DMA_TO_DEVICE;
+-              }
+-              nents = req_ctx->src_nents;
+-              dma_unmap_sg(dev, req->src, nents, src_direction);
+-              free_buf_chain(req_ctx->src, crypt->src_buf);
++                      free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
++              }
++              free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+               req->base.complete(&req->base, failed);
+               break;
+       }
+@@ -750,56 +737,35 @@ static int setup_cipher(struct crypto_tf
+       return 0;
+ }
+-static int count_sg(struct scatterlist *sg, int nbytes)
+-{
+-      int i;
+-      for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+-              nbytes -= sg->length;
+-      return i;
+-}
+-
+-static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
+-                      unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
++static struct buffer_desc *chainup_buffers(struct device *dev,
++              struct scatterlist *sg, unsigned nbytes,
++              struct buffer_desc *buf, gfp_t flags,
++              enum dma_data_direction dir)
+ {
+-      int nents = 0;
+-
+-      while (nbytes > 0) {
++      for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
++              unsigned len = min(nbytes, sg->length);
+               struct buffer_desc *next_buf;
+               u32 next_buf_phys;
+-              unsigned len = min(nbytes, sg_dma_len(sg));
++              void *ptr;
+-              nents++;
+               nbytes -= len;
+-              if (!buf->phys_addr) {
+-                      buf->phys_addr = sg_dma_address(sg);
+-                      buf->buf_len = len;
+-                      buf->next = NULL;
+-                      buf->phys_next = 0;
+-                      goto next;
+-              }
+-              /* Two consecutive chunks on one page may be handled by the old
+-               * buffer descriptor, increased by the length of the new one
+-               */
+-              if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
+-                      buf->buf_len += len;
+-                      goto next;
+-              }
++              ptr = page_address(sg_page(sg)) + sg->offset;
+               next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
+-              if (!next_buf)
+-                      return NULL;
++              if (!next_buf) {
++                      buf = NULL;
++                      break;
++              }
++              sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
+               buf->next = next_buf;
+               buf->phys_next = next_buf_phys;
+-
+               buf = next_buf;
+-              buf->next = NULL;
+-              buf->phys_next = 0;
++
+               buf->phys_addr = sg_dma_address(sg);
+               buf->buf_len = len;
+-next:
+-              if (nbytes > 0) {
+-                      sg = sg_next(sg);
+-              }
++              buf->dir = dir;
+       }
++      buf->next = NULL;
++      buf->phys_next = 0;
+       return buf;
+ }
+@@ -860,12 +826,12 @@ static int ablk_perform(struct ablkciphe
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+       unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+-      int ret = -ENOMEM;
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+-      unsigned int nbytes = req->nbytes, nents;
++      unsigned int nbytes = req->nbytes;
+       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
++      struct buffer_desc src_hook;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+@@ -878,7 +844,7 @@ static int ablk_perform(struct ablkciphe
+       crypt = get_crypt_desc();
+       if (!crypt)
+-              return ret;
++              return -ENOMEM;
+       crypt->data.ablk_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+@@ -891,53 +857,41 @@ static int ablk_perform(struct ablkciphe
+       BUG_ON(ivsize && !req->info);
+       memcpy(crypt->iv, req->info, ivsize);
+       if (req->src != req->dst) {
++              struct buffer_desc dst_hook;
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+-              nents = count_sg(req->dst, nbytes);
+               /* This was never tested by Intel
+                * for more than one dst buffer, I think. */
+-              BUG_ON(nents != 1);
+-              req_ctx->dst_nents = nents;
+-              dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
+-              req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
+-              if (!req_ctx->dst)
+-                      goto unmap_sg_dest;
+-              req_ctx->dst->phys_addr = 0;
+-              if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
++              BUG_ON(req->dst->length < nbytes);
++              req_ctx->dst = NULL;
++              if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
++                                      flags, DMA_FROM_DEVICE))
+                       goto free_buf_dest;
+               src_direction = DMA_TO_DEVICE;
++              req_ctx->dst = dst_hook.next;
++              crypt->dst_buf = dst_hook.phys_next;
+       } else {
+               req_ctx->dst = NULL;
+-              req_ctx->dst_nents = 0;
+       }
+-      nents = count_sg(req->src, nbytes);
+-      req_ctx->src_nents = nents;
+-      dma_map_sg(dev, req->src, nents, src_direction);
+-
+-      req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
+-      if (!req_ctx->src)
+-              goto unmap_sg_src;
+-      req_ctx->src->phys_addr = 0;
+-      if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
++      req_ctx->src = NULL;
++      if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
++                              flags, src_direction))
+               goto free_buf_src;
++      req_ctx->src = src_hook.next;
++      crypt->src_buf = src_hook.phys_next;
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
+       qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(SEND_QID));
+       return -EINPROGRESS;
+ free_buf_src:
+-      free_buf_chain(req_ctx->src, crypt->src_buf);
+-unmap_sg_src:
+-      dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
++      free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ free_buf_dest:
+       if (req->src != req->dst) {
+-              free_buf_chain(req_ctx->dst, crypt->dst_buf);
+-unmap_sg_dest:
+-              dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
+-                      DMA_FROM_DEVICE);
++              free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+       }
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+-      return ret;
++      return -ENOMEM;
+ }
+ static int ablk_encrypt(struct ablkcipher_request *req)
+@@ -985,7 +939,7 @@ static int hmac_inconsistent(struct scat
+                       break;
+               offset += sg->length;
+-              sg = sg_next(sg);
++              sg = scatterwalk_sg_next(sg);
+       }
+       return (start + nbytes > offset + sg->length);
+ }
+@@ -997,11 +951,10 @@ static int aead_perform(struct aead_requ
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       unsigned ivsize = crypto_aead_ivsize(tfm);
+       unsigned authsize = crypto_aead_authsize(tfm);
+-      int ret = -ENOMEM;
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+-      unsigned int cryptlen, nents;
+-      struct buffer_desc *buf;
++      unsigned int cryptlen;
++      struct buffer_desc *buf, src_hook;
+       struct aead_ctx *req_ctx = aead_request_ctx(req);
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+@@ -1022,7 +975,7 @@ static int aead_perform(struct aead_requ
+       }
+       crypt = get_crypt_desc();
+       if (!crypt)
+-              return ret;
++              return -ENOMEM;
+       crypt->data.aead_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+@@ -1041,31 +994,27 @@ static int aead_perform(struct aead_requ
+               BUG(); /* -ENOTSUP because of my lazyness */
+       }
+-      req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
+-      if (!req_ctx->buffer)
+-              goto out;
+-      req_ctx->buffer->phys_addr = 0;
+       /* ASSOC data */
+-      nents = count_sg(req->assoc, req->assoclen);
+-      req_ctx->assoc_nents = nents;
+-      dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
+-      buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
++      buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
++              flags, DMA_TO_DEVICE);
++      req_ctx->buffer = src_hook.next;
++      crypt->src_buf = src_hook.phys_next;
+       if (!buf)
+-              goto unmap_sg_assoc;
++              goto out;
+       /* IV */
+       sg_init_table(&req_ctx->ivlist, 1);
+       sg_set_buf(&req_ctx->ivlist, iv, ivsize);
+-      dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
+-      buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
++      buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
++                      DMA_BIDIRECTIONAL);
+       if (!buf)
+-              goto unmap_sg_iv;
++              goto free_chain;
+       if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
+               /* The 12 hmac bytes are scattered,
+                * we need to copy them into a safe buffer */
+               req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
+                               &crypt->icv_rev_aes);
+               if (unlikely(!req_ctx->hmac_virt))
+-                      goto unmap_sg_iv;
++                      goto free_chain;
+               if (!encrypt) {
+                       scatterwalk_map_and_copy(req_ctx->hmac_virt,
+                               req->src, cryptlen, authsize, 0);
+@@ -1075,33 +1024,28 @@ static int aead_perform(struct aead_requ
+               req_ctx->hmac_virt = NULL;
+       }
+       /* Crypt */
+-      nents = count_sg(req->src, cryptlen + authsize);
+-      req_ctx->src_nents = nents;
+-      dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+-      buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
++      buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
++                      DMA_BIDIRECTIONAL);
+       if (!buf)
+-              goto unmap_sg_src;
++              goto free_hmac_virt;
+       if (!req_ctx->hmac_virt) {
+               crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
+       }
++
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
+       qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(SEND_QID));
+       return -EINPROGRESS;
+-unmap_sg_src:
+-      dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
++free_hmac_virt:
+       if (req_ctx->hmac_virt) {
+               dma_pool_free(buffer_pool, req_ctx->hmac_virt,
+                               crypt->icv_rev_aes);
+       }
+-unmap_sg_iv:
+-      dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
+-unmap_sg_assoc:
+-      dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
+-      free_buf_chain(req_ctx->buffer, crypt->src_buf);
++free_chain:
++      free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+ out:
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+-      return ret;
++      return -ENOMEM;
+ }
+ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
diff --git a/queue-2.6.29/exit_notify-kill-the-wrong-capable-check.patch b/queue-2.6.29/exit_notify-kill-the-wrong-capable-check.patch
new file mode 100644 (file)
index 0000000..93ab3fd
--- /dev/null
@@ -0,0 +1,38 @@
+From 432870dab85a2f69dc417022646cb9a70acf7f94 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Mon, 6 Apr 2009 16:16:02 +0200
+Subject: exit_notify: kill the wrong capable(CAP_KILL) check (CVE-2009-1337)
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+CVE-2009-1337
+
+commit 432870dab85a2f69dc417022646cb9a70acf7f94 upstream.
+
+The CAP_KILL check in exit_notify() looks just wrong, kill it.
+
+Whatever logic we have to reset ->exit_signal, the malicious user
+can bypass it if it execs the setuid application before exiting.
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Serge Hallyn <serue@us.ibm.com>
+Acked-by: Roland McGrath <roland@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/exit.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -950,8 +950,7 @@ static void exit_notify(struct task_stru
+        */
+       if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
+           (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
+-           tsk->self_exec_id != tsk->parent_exec_id) &&
+-          !capable(CAP_KILL))
++           tsk->self_exec_id != tsk->parent_exec_id))
+               tsk->exit_signal = SIGCHLD;
+       signal = tracehook_notify_death(tsk, &cookie, group_dead);
diff --git a/queue-2.6.29/pci-fix-incorrect-mask-of-pm-no_soft_reset-bit.patch b/queue-2.6.29/pci-fix-incorrect-mask-of-pm-no_soft_reset-bit.patch
new file mode 100644 (file)
index 0000000..0de3df7
--- /dev/null
@@ -0,0 +1,29 @@
+From 998dd7c719f62dcfa91d7bf7f4eb9c160e03d817 Mon Sep 17 00:00:00 2001
+From: Yu Zhao <yu.zhao@intel.com>
+Date: Wed, 25 Feb 2009 13:15:52 +0800
+Subject: PCI: fix incorrect mask of PM No_Soft_Reset bit
+
+From: Yu Zhao <yu.zhao@intel.com>
+
+commit 998dd7c719f62dcfa91d7bf7f4eb9c160e03d817 upstream.
+
+Reviewed-by: Matthew Wilcox <matthew@wil.cx>
+Signed-off-by: Yu Zhao <yu.zhao@intel.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/pci_regs.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/pci_regs.h
++++ b/include/linux/pci_regs.h
+@@ -235,7 +235,7 @@
+ #define  PCI_PM_CAP_PME_SHIFT 11      /* Start of the PME Mask in PMC */
+ #define PCI_PM_CTRL           4       /* PM control and status register */
+ #define  PCI_PM_CTRL_STATE_MASK       0x0003  /* Current power state (D0 to D3) */
+-#define  PCI_PM_CTRL_NO_SOFT_RESET    0x0004  /* No reset for D3hot->D0 */
++#define  PCI_PM_CTRL_NO_SOFT_RESET    0x0008  /* No reset for D3hot->D0 */
+ #define  PCI_PM_CTRL_PME_ENABLE       0x0100  /* PME pin enable */
+ #define  PCI_PM_CTRL_DATA_SEL_MASK    0x1e00  /* Data select (??) */
+ #define  PCI_PM_CTRL_DATA_SCALE_MASK  0x6000  /* Data scale (??) */
index e1617abb2df17fcaeba02a79319c0c43cc38b4ac..f1fa000ea7b60c7789d907ddb61b3b8acd0d5dba 100644 (file)
@@ -11,3 +11,8 @@ kvm-fix-overlapping-check-for-memory-slots.patch
 kvm-x86-release-time_page-on-vcpu-destruction.patch
 usb-unusual-device-support-for-gold-mp3-player-energy.patch
 virtio-rng-remove-false-bug-for-spurious-callbacks.patch
+b44-use-kernel-dma-addresses-for-the-kernel-dma-api.patch
+block-include-empty-disks-in-proc-diskstats.patch
+crypto-ixp4xx-fix-handling-of-chained-sg-buffers.patch
+exit_notify-kill-the-wrong-capable-check.patch
+pci-fix-incorrect-mask-of-pm-no_soft_reset-bit.patch