]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 13 Aug 2015 23:32:13 +0000 (16:32 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 13 Aug 2015 23:32:13 +0000 (16:32 -0700)
added patches:
crypto-ixp4xx-remove-bogus-bug_on-on-scattered-dst-buffer.patch
rbd-fix-copyup-completion-race.patch
x86-xen-probe-target-addresses-in-set_aliased_prot-before-the-hypercall.patch
xen-gntdevt-fix-race-condition-in-gntdev_release.patch

queue-3.10/crypto-ixp4xx-remove-bogus-bug_on-on-scattered-dst-buffer.patch [new file with mode: 0644]
queue-3.10/rbd-fix-copyup-completion-race.patch [new file with mode: 0644]
queue-3.10/series
queue-3.10/x86-xen-probe-target-addresses-in-set_aliased_prot-before-the-hypercall.patch [new file with mode: 0644]
queue-3.10/xen-gntdevt-fix-race-condition-in-gntdev_release.patch [new file with mode: 0644]

diff --git a/queue-3.10/crypto-ixp4xx-remove-bogus-bug_on-on-scattered-dst-buffer.patch b/queue-3.10/crypto-ixp4xx-remove-bogus-bug_on-on-scattered-dst-buffer.patch
new file mode 100644 (file)
index 0000000..4e2eb8c
--- /dev/null
@@ -0,0 +1,30 @@
+From f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Wed, 22 Jul 2015 18:05:35 +0800
+Subject: crypto: ixp4xx - Remove bogus BUG_ON on scattered dst buffer
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 upstream.
+
+This patch removes a bogus BUG_ON in the ablkcipher path that
+triggers when the destination buffer is different from the source
+buffer and is scattered.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ixp4xx_crypto.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -915,7 +915,6 @@ static int ablk_perform(struct ablkciphe
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+               /* This was never tested by Intel
+                * for more than one dst buffer, I think. */
+-              BUG_ON(req->dst->length < nbytes);
+               req_ctx->dst = NULL;
+               if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+                                       flags, DMA_FROM_DEVICE))
diff --git a/queue-3.10/rbd-fix-copyup-completion-race.patch b/queue-3.10/rbd-fix-copyup-completion-race.patch
new file mode 100644 (file)
index 0000000..316d60b
--- /dev/null
@@ -0,0 +1,143 @@
+From 2761713d35e370fd640b5781109f753066b746c4 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Thu, 16 Jul 2015 17:36:11 +0300
+Subject: rbd: fix copyup completion race
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 2761713d35e370fd640b5781109f753066b746c4 upstream.
+
+For write/discard obj_requests that involved a copyup method call, the
+opcode of the first op is CEPH_OSD_OP_CALL and the ->callback is
+rbd_img_obj_copyup_callback().  The latter frees copyup pages, sets
+->xferred and delegates to rbd_img_obj_callback(), the "normal" image
+object callback, for reporting to block layer and putting refs.
+
+rbd_osd_req_callback() however treats CEPH_OSD_OP_CALL as a trivial op,
+which means obj_request is marked done in rbd_osd_trivial_callback(),
+*before* ->callback is invoked and rbd_img_obj_copyup_callback() has
+a chance to run.  Marking obj_request done essentially means giving
+rbd_img_obj_callback() a license to end it at any moment, so if another
+obj_request from the same img_request is being completed concurrently,
+rbd_img_obj_end_request() may very well be called on such prematurally
+marked done request:
+
+<obj_request-1/2 reply>
+handle_reply()
+  rbd_osd_req_callback()
+    rbd_osd_trivial_callback()
+    rbd_obj_request_complete()
+    rbd_img_obj_copyup_callback()
+    rbd_img_obj_callback()
+                                    <obj_request-2/2 reply>
+                                    handle_reply()
+                                      rbd_osd_req_callback()
+                                        rbd_osd_trivial_callback()
+      for_each_obj_request(obj_request->img_request) {
+        rbd_img_obj_end_request(obj_request-1/2)
+        rbd_img_obj_end_request(obj_request-2/2) <--
+      }
+
+Calling rbd_img_obj_end_request() on such a request leads to trouble,
+in particular because its ->xfferred is 0.  We report 0 to the block
+layer with blk_update_request(), get back 1 for "this request has more
+data in flight" and then trip on
+
+    rbd_assert(more ^ (which == img_request->obj_request_count));
+
+with rhs (which == ...) being 1 because rbd_img_obj_end_request() has
+been called for both requests and lhs (more) being 1 because we haven't
+got a chance to set ->xfferred in rbd_img_obj_copyup_callback() yet.
+
+To fix this, leverage that rbd wants to call class methods in only two
+cases: one is a generic method call wrapper (obj_request is standalone)
+and the other is a copyup (obj_request is part of an img_request).  So
+make a dedicated handler for CEPH_OSD_OP_CALL and directly invoke
+rbd_img_obj_copyup_callback() from it if obj_request is part of an
+img_request, similar to how CEPH_OSD_OP_READ handler invokes
+rbd_img_obj_request_read_callback().
+
+Since rbd_img_obj_copyup_callback() is now being called from the OSD
+request callback (only), it is renamed to rbd_osd_copyup_callback().
+
+Cc: Alex Elder <elder@linaro.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Alex Elder <elder@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/rbd.c |   22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -457,6 +457,7 @@ void rbd_warn(struct rbd_device *rbd_dev
+ #  define rbd_assert(expr)    ((void) 0)
+ #endif /* !RBD_DEBUG */
++static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
+ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+@@ -1670,6 +1671,16 @@ static void rbd_osd_stat_callback(struct
+       obj_request_done_set(obj_request);
+ }
++static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
++{
++      dout("%s: obj %p\n", __func__, obj_request);
++
++      if (obj_request_img_data_test(obj_request))
++              rbd_osd_copyup_callback(obj_request);
++      else
++              obj_request_done_set(obj_request);
++}
++
+ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+                               struct ceph_msg *msg)
+ {
+@@ -1708,6 +1719,8 @@ static void rbd_osd_req_callback(struct
+               rbd_osd_stat_callback(obj_request);
+               break;
+       case CEPH_OSD_OP_CALL:
++              rbd_osd_call_callback(obj_request);
++              break;
+       case CEPH_OSD_OP_NOTIFY_ACK:
+       case CEPH_OSD_OP_WATCH:
+               rbd_osd_trivial_callback(obj_request);
+@@ -2305,13 +2318,15 @@ out_unwind:
+ }
+ static void
+-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
++rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
+ {
+       struct rbd_img_request *img_request;
+       struct rbd_device *rbd_dev;
+       struct page **pages;
+       u32 page_count;
++      dout("%s: obj %p\n", __func__, obj_request);
++
+       rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+       rbd_assert(obj_request_img_data_test(obj_request));
+       img_request = obj_request->img_request;
+@@ -2337,9 +2352,7 @@ rbd_img_obj_copyup_callback(struct rbd_o
+       if (!obj_request->result)
+               obj_request->xferred = obj_request->length;
+-      /* Finish up with the normal image object callback */
+-
+-      rbd_img_obj_callback(obj_request);
++      obj_request_done_set(obj_request);
+ }
+ static void
+@@ -2436,7 +2449,6 @@ rbd_img_obj_parent_read_full_callback(st
+       /* All set, send it off. */
+-      orig_request->callback = rbd_img_obj_copyup_callback;
+       osdc = &rbd_dev->rbd_client->client->osdc;
+       img_result = rbd_obj_request_submit(osdc, orig_request);
+       if (!img_result)
index 828cbe74edfa50cc7ecd2903bf8279b14a9f9132..7f6cbe067d8afeb34fc757c75421096e34d3a94c 100644 (file)
@@ -14,3 +14,7 @@ mfd-sm501-dbg_regs-attribute-must-be-read-only.patch
 perf-x86-amd-rework-amd-pmu-init-code.patch
 sparc64-fix-fpu-register-corruption-with-aes-crypto-offload.patch
 sparc64-fix-userspace-fpu-register-corruptions.patch
+x86-xen-probe-target-addresses-in-set_aliased_prot-before-the-hypercall.patch
+xen-gntdevt-fix-race-condition-in-gntdev_release.patch
+crypto-ixp4xx-remove-bogus-bug_on-on-scattered-dst-buffer.patch
+rbd-fix-copyup-completion-race.patch
diff --git a/queue-3.10/x86-xen-probe-target-addresses-in-set_aliased_prot-before-the-hypercall.patch b/queue-3.10/x86-xen-probe-target-addresses-in-set_aliased_prot-before-the-hypercall.patch
new file mode 100644 (file)
index 0000000..5e39dd5
--- /dev/null
@@ -0,0 +1,115 @@
+From aa1acff356bbedfd03b544051f5b371746735d89 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 30 Jul 2015 14:31:31 -0700
+Subject: x86/xen: Probe target addresses in set_aliased_prot() before the hypercall
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit aa1acff356bbedfd03b544051f5b371746735d89 upstream.
+
+The update_va_mapping hypercall can fail if the VA isn't present
+in the guest's page tables.  Under certain loads, this can
+result in an OOPS when the target address is in unpopulated vmap
+space.
+
+While we're at it, add comments to help explain what's going on.
+
+This isn't a great long-term fix.  This code should probably be
+changed to use something like set_memory_ro.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: David Vrabel <dvrabel@cantab.net>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jan Beulich <jbeulich@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: security@kernel.org <security@kernel.org>
+Cc: xen-devel <xen-devel@lists.xen.org>
+Link: http://lkml.kernel.org/r/0b0e55b995cda11e7829f140b833ef932fcabe3a.1438291540.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/enlighten.c |   40 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -481,6 +481,7 @@ static void set_aliased_prot(void *v, pg
+       pte_t pte;
+       unsigned long pfn;
+       struct page *page;
++      unsigned char dummy;
+       ptep = lookup_address((unsigned long)v, &level);
+       BUG_ON(ptep == NULL);
+@@ -490,6 +491,32 @@ static void set_aliased_prot(void *v, pg
+       pte = pfn_pte(pfn, prot);
++      /*
++       * Careful: update_va_mapping() will fail if the virtual address
++       * we're poking isn't populated in the page tables.  We don't
++       * need to worry about the direct map (that's always in the page
++       * tables), but we need to be careful about vmap space.  In
++       * particular, the top level page table can lazily propagate
++       * entries between processes, so if we've switched mms since we
++       * vmapped the target in the first place, we might not have the
++       * top-level page table entry populated.
++       *
++       * We disable preemption because we want the same mm active when
++       * we probe the target and when we issue the hypercall.  We'll
++       * have the same nominal mm, but if we're a kernel thread, lazy
++       * mm dropping could change our pgd.
++       *
++       * Out of an abundance of caution, this uses __get_user() to fault
++       * in the target address just in case there's some obscure case
++       * in which the target address isn't readable.
++       */
++
++      preempt_disable();
++
++      pagefault_disable();    /* Avoid warnings due to being atomic. */
++      __get_user(dummy, (unsigned char __user __force *)v);
++      pagefault_enable();
++
+       if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+               BUG();
+@@ -501,6 +528,8 @@ static void set_aliased_prot(void *v, pg
+                               BUG();
+       } else
+               kmap_flush_unused();
++
++      preempt_enable();
+ }
+ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -508,6 +537,17 @@ static void xen_alloc_ldt(struct desc_st
+       const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+       int i;
++      /*
++       * We need to mark the all aliases of the LDT pages RO.  We
++       * don't need to call vm_flush_aliases(), though, since that's
++       * only responsible for flushing aliases out the TLBs, not the
++       * page tables, and Xen will flush the TLB for us if needed.
++       *
++       * To avoid confusing future readers: none of this is necessary
++       * to load the LDT.  The hypervisor only checks this when the
++       * LDT is faulted in due to subsequent descriptor access.
++       */
++
+       for(i = 0; i < entries; i += entries_per_page)
+               set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+ }
diff --git a/queue-3.10/xen-gntdevt-fix-race-condition-in-gntdev_release.patch b/queue-3.10/xen-gntdevt-fix-race-condition-in-gntdev_release.patch
new file mode 100644 (file)
index 0000000..63aadc1
--- /dev/null
@@ -0,0 +1,44 @@
+From 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
+ <marmarek@invisiblethingslab.com>
+Date: Fri, 26 Jun 2015 03:28:24 +0200
+Subject: xen/gntdevt: Fix race condition in gntdev_release()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
+
+commit 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 upstream.
+
+While gntdev_release() is called the MMU notifier is still registered
+and can traverse priv->maps list even if no pages are mapped (which is
+the case -- gntdev_release() is called after all). But
+gntdev_release() will clear that list, so make sure that only one of
+those things happens at the same time.
+
+Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -534,12 +534,14 @@ static int gntdev_release(struct inode *
+       pr_debug("priv %p\n", priv);
++      mutex_lock(&priv->lock);
+       while (!list_empty(&priv->maps)) {
+               map = list_entry(priv->maps.next, struct grant_map, next);
+               list_del(&map->next);
+               gntdev_put_map(NULL /* already removed */, map);
+       }
+       WARN_ON(!list_empty(&priv->freeable_maps));
++      mutex_unlock(&priv->lock);
+       if (use_ptemod)
+               mmu_notifier_unregister(&priv->mn, priv->mm);