--- /dev/null
+From 9a258afa928b45e6dd2efcac46ccf7eea705d35a Mon Sep 17 00:00:00 2001
+From: Roger Quadros <rogerq@ti.com>
+Date: Thu, 16 Jul 2015 16:16:44 +0300
+Subject: ARM: OMAP2+: hwmod: Fix _wait_target_ready() for hwmods without sysc
+
+From: Roger Quadros <rogerq@ti.com>
+
+commit 9a258afa928b45e6dd2efcac46ccf7eea705d35a upstream.
+
+For hwmods without sysc, _init_mpu_rt_base(oh) won't be called and so
+_find_mpu_rt_port(oh) will return NULL thus preventing ready state check
+on those modules after the module is enabled.
+
+This can potentially cause a bus access error if the module is accessed
+before the module is ready.
+
+Fix this by unconditionally calling _init_mpu_rt_base() during hwmod
+_init(). Do ioremap only if we need SYSC access.
+
+Eventhough _wait_target_ready() check doesn't really need MPU RT port but
+just the PRCM registers, we still mandate that the hwmod must have an
+MPU RT port if ready state check needs to be done. Else it would mean that
+the module is not accessible by MPU so there is no point in waiting
+for target to be ready.
+
+e.g. this fixes the below DCAN bus access error on AM437x-gp-evm.
+
+[ 16.672978] ------------[ cut here ]------------
+[ 16.677885] WARNING: CPU: 0 PID: 1580 at drivers/bus/omap_l3_noc.c:147 l3_interrupt_handler+0x234/0x35c()
+[ 16.687946] 44000000.ocp:L3 Custom Error: MASTER M2 (64-bit) TARGET L4_PER_0 (Read): Data Access in User mode during Functional access
+[ 16.700654] Modules linked in: xhci_hcd btwilink ti_vpfe dwc3 videobuf2_core ov2659 bluetooth v4l2_common videodev ti_am335x_adc kfifo_buf industrialio c_can_platform videobuf2_dma_contig media snd_soc_tlv320aic3x pixcir_i2c_ts c_can dc
+[ 16.731144] CPU: 0 PID: 1580 Comm: rpc.statd Not tainted 3.14.26-02561-gf733aa036398 #180
+[ 16.739747] Backtrace:
+[ 16.742336] [<c0011108>] (dump_backtrace) from [<c00112a4>] (show_stack+0x18/0x1c)
+[ 16.750285] r6:00000093 r5:00000009 r4:eab5b8a8 r3:00000000
+[ 16.756252] [<c001128c>] (show_stack) from [<c05a4418>] (dump_stack+0x20/0x28)
+[ 16.763870] [<c05a43f8>] (dump_stack) from [<c0037120>] (warn_slowpath_common+0x6c/0x8c)
+[ 16.772408] [<c00370b4>] (warn_slowpath_common) from [<c00371e4>] (warn_slowpath_fmt+0x38/0x40)
+[ 16.781550] r8:c05d1f90 r7:c0730844 r6:c0730448 r5:80080003 r4:ed0cd210
+[ 16.788626] [<c00371b0>] (warn_slowpath_fmt) from [<c027fa94>] (l3_interrupt_handler+0x234/0x35c)
+[ 16.797968] r3:ed0cd480 r2:c0730508
+[ 16.801747] [<c027f860>] (l3_interrupt_handler) from [<c0063758>] (handle_irq_event_percpu+0x54/0x1bc)
+[ 16.811533] r10:ed005600 r9:c084855b r8:0000002a r7:00000000 r6:00000000 r5:0000002a
+[ 16.819780] r4:ed0e6d80
+[ 16.822453] [<c0063704>] (handle_irq_event_percpu) from [<c00638f0>] (handle_irq_event+0x30/0x40)
+[ 16.831789] r10:eb2b6938 r9:eb2b6960 r8:bf011420 r7:fa240100 r6:00000000 r5:0000002a
+[ 16.840052] r4:ed005600
+[ 16.842744] [<c00638c0>] (handle_irq_event) from [<c00661d8>] (handle_fasteoi_irq+0x74/0x128)
+[ 16.851702] r4:ed005600 r3:00000000
+[ 16.855479] [<c0066164>] (handle_fasteoi_irq) from [<c0063068>] (generic_handle_irq+0x28/0x38)
+[ 16.864523] r4:0000002a r3:c0066164
+[ 16.868294] [<c0063040>] (generic_handle_irq) from [<c000ef60>] (handle_IRQ+0x38/0x8c)
+[ 16.876612] r4:c081c640 r3:00000202
+[ 16.880380] [<c000ef28>] (handle_IRQ) from [<c00084f0>] (gic_handle_irq+0x30/0x5c)
+[ 16.888328] r6:eab5ba38 r5:c0804460 r4:fa24010c r3:00000100
+[ 16.894303] [<c00084c0>] (gic_handle_irq) from [<c05a8d80>] (__irq_svc+0x40/0x50)
+[ 16.902193] Exception stack(0xeab5ba38 to 0xeab5ba80)
+[ 16.907499] ba20: 00000000 00000006
+[ 16.916108] ba40: fa1d0000 fa1d0008 ed3d3000 eab5bab4 ed3d3460 c0842af4 bf011420 eb2b6960
+[ 16.924716] ba60: eb2b6938 eab5ba8c eab5ba90 eab5ba80 bf035220 bf07702c 600f0013 ffffffff
+[ 16.933317] r7:eab5ba6c r6:ffffffff r5:600f0013 r4:bf07702c
+[ 16.939317] [<bf077000>] (c_can_plat_read_reg_aligned_to_16bit [c_can_platform]) from [<bf035220>] (c_can_get_berr_counter+0x38/0x64 [c_can])
+[ 16.952696] [<bf0351e8>] (c_can_get_berr_counter [c_can]) from [<bf010294>] (can_fill_info+0x124/0x15c [can_dev])
+[ 16.963480] r5:ec8c9740 r4:ed3d3000
+[ 16.967253] [<bf010170>] (can_fill_info [can_dev]) from [<c0502fa8>] (rtnl_fill_ifinfo+0x58c/0x8fc)
+[ 16.976749] r6:ec8c9740 r5:ed3d3000 r4:eb2b6780
+[ 16.981613] [<c0502a1c>] (rtnl_fill_ifinfo) from [<c0503408>] (rtnl_dump_ifinfo+0xf0/0x1dc)
+[ 16.990401] r10:ec8c9740 r9:00000000 r8:00000000 r7:00000000 r6:ebd4d1b4 r5:ed3d3000
+[ 16.998671] r4:00000000
+[ 17.001342] [<c0503318>] (rtnl_dump_ifinfo) from [<c050e6e4>] (netlink_dump+0xa8/0x1e0)
+[ 17.009772] r10:00000000 r9:00000000 r8:c0503318 r7:ebf3e6c0 r6:ebd4d1b4 r5:ec8c9740
+[ 17.018050] r4:ebd4d000
+[ 17.020714] [<c050e63c>] (netlink_dump) from [<c050ec10>] (__netlink_dump_start+0x104/0x154)
+[ 17.029591] r6:eab5bd34 r5:ec8c9980 r4:ebd4d000
+[ 17.034454] [<c050eb0c>] (__netlink_dump_start) from [<c0505604>] (rtnetlink_rcv_msg+0x110/0x1f4)
+[ 17.043778] r7:00000000 r6:ec8c9980 r5:00000f40 r4:ebf3e6c0
+[ 17.049743] [<c05054f4>] (rtnetlink_rcv_msg) from [<c05108e8>] (netlink_rcv_skb+0xb4/0xc8)
+[ 17.058449] r8:eab5bdac r7:ec8c9980 r6:c05054f4 r5:ec8c9980 r4:ebf3e6c0
+[ 17.065534] [<c0510834>] (netlink_rcv_skb) from [<c0504134>] (rtnetlink_rcv+0x24/0x2c)
+[ 17.073854] r6:ebd4d000 r5:00000014 r4:ec8c9980 r3:c0504110
+[ 17.079846] [<c0504110>] (rtnetlink_rcv) from [<c05102ac>] (netlink_unicast+0x180/0x1ec)
+[ 17.088363] r4:ed0c6800 r3:c0504110
+[ 17.092113] [<c051012c>] (netlink_unicast) from [<c0510670>] (netlink_sendmsg+0x2ac/0x380)
+[ 17.100813] r10:00000000 r8:00000008 r7:ec8c9980 r6:ebd4d000 r5:eab5be70 r4:eab5bee4
+[ 17.109083] [<c05103c4>] (netlink_sendmsg) from [<c04dfdb4>] (sock_sendmsg+0x90/0xb0)
+[ 17.117305] r10:00000000 r9:eab5a000 r8:becdda3c r7:0000000c r6:ea978400 r5:eab5be70
+[ 17.125563] r4:c05103c4
+[ 17.128225] [<c04dfd24>] (sock_sendmsg) from [<c04e1c28>] (SyS_sendto+0xb8/0xdc)
+[ 17.136001] r6:becdda5c r5:00000014 r4:ecd37040
+[ 17.140876] [<c04e1b70>] (SyS_sendto) from [<c000e680>] (ret_fast_syscall+0x0/0x30)
+[ 17.148923] r10:00000000 r8:c000e804 r7:00000122 r6:becdda5c r5:0000000c r4:becdda5c
+[ 17.157169] ---[ end trace 2b71e15b38f58bad ]---
+
+Fixes: 6423d6df1440 ("ARM: OMAP2+: hwmod: check for module address space during init")
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Signed-off-by: Paul Walmsley <paul@pwsan.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-omap2/omap_hwmod.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2452,6 +2452,9 @@ static int of_dev_hwmod_lookup(struct de
+ * registers. This address is needed early so the OCP registers that
+ * are part of the device's address space can be ioremapped properly.
+ *
++ * If SYSC access is not needed, the registers will not be remapped
++ * and non-availability of MPU access is not treated as an error.
++ *
+ * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
+ * -ENXIO on absent or invalid register target address space.
+ */
+@@ -2466,6 +2469,11 @@ static int __init _init_mpu_rt_base(stru
+
+ _save_mpu_port_index(oh);
+
++ /* if we don't need sysc access we don't need to ioremap */
++ if (!oh->class->sysc)
++ return 0;
++
++ /* we can't continue without MPU PORT if we need sysc access */
+ if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+ return -ENXIO;
+
+@@ -2475,8 +2483,10 @@ static int __init _init_mpu_rt_base(stru
+ oh->name);
+
+ /* Extract the IO space from device tree blob */
+- if (!np)
++ if (!np) {
++ pr_err("omap_hwmod: %s: no dt node\n", oh->name);
+ return -ENXIO;
++ }
+
+ va_start = of_iomap(np, index + oh->mpu_rt_idx);
+ } else {
+@@ -2535,13 +2545,11 @@ static int __init _init(struct omap_hwmo
+ oh->name, np->name);
+ }
+
+- if (oh->class->sysc) {
+- r = _init_mpu_rt_base(oh, NULL, index, np);
+- if (r < 0) {
+- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+- oh->name);
+- return 0;
+- }
++ r = _init_mpu_rt_base(oh, NULL, index, np);
++ if (r < 0) {
++ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
++ oh->name);
++ return 0;
+ }
+
+ r = _init_clocks(oh, NULL);
--- /dev/null
+From fa8173a3ef0570affde7da352de202190b3786c2 Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@ingics.com>
+Date: Thu, 23 Jul 2015 23:22:26 +0800
+Subject: ASoC: pcm1681: Fix setting de-emphasis sampling rate selection
+
+From: Axel Lin <axel.lin@ingics.com>
+
+commit fa8173a3ef0570affde7da352de202190b3786c2 upstream.
+
+The de-emphasis sampling rate selection is controlled by BIT[3:4] of
+PCM1681_DEEMPH_CONTROL register. Do proper left shift to set it.
+
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Acked-by: Marek Belisko <marek.belisko@streamunlimited.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/pcm1681.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/pcm1681.c
++++ b/sound/soc/codecs/pcm1681.c
+@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd
+
+ if (val != -1) {
+ regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+- PCM1681_DEEMPH_RATE_MASK, val);
++ PCM1681_DEEMPH_RATE_MASK, val << 3);
+ enable = 1;
+ } else
+ enable = 0;
--- /dev/null
+From f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Wed, 22 Jul 2015 18:05:35 +0800
+Subject: crypto: ixp4xx - Remove bogus BUG_ON on scattered dst buffer
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 upstream.
+
+This patch removes a bogus BUG_ON in the ablkcipher path that
+triggers when the destination buffer is different from the source
+buffer and is scattered.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ixp4xx_crypto.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkciphe
+ crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ /* This was never tested by Intel
+ * for more than one dst buffer, I think. */
+- BUG_ON(req->dst->length < nbytes);
+ req_ctx->dst = NULL;
+ if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+ flags, DMA_FROM_DEVICE))
--- /dev/null
+From 2761713d35e370fd640b5781109f753066b746c4 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Thu, 16 Jul 2015 17:36:11 +0300
+Subject: rbd: fix copyup completion race
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 2761713d35e370fd640b5781109f753066b746c4 upstream.
+
+For write/discard obj_requests that involved a copyup method call, the
+opcode of the first op is CEPH_OSD_OP_CALL and the ->callback is
+rbd_img_obj_copyup_callback(). The latter frees copyup pages, sets
+->xferred and delegates to rbd_img_obj_callback(), the "normal" image
+object callback, for reporting to block layer and putting refs.
+
+rbd_osd_req_callback() however treats CEPH_OSD_OP_CALL as a trivial op,
+which means obj_request is marked done in rbd_osd_trivial_callback(),
+*before* ->callback is invoked and rbd_img_obj_copyup_callback() has
+a chance to run. Marking obj_request done essentially means giving
+rbd_img_obj_callback() a license to end it at any moment, so if another
+obj_request from the same img_request is being completed concurrently,
+rbd_img_obj_end_request() may very well be called on such prematurally
+marked done request:
+
+<obj_request-1/2 reply>
+handle_reply()
+ rbd_osd_req_callback()
+ rbd_osd_trivial_callback()
+ rbd_obj_request_complete()
+ rbd_img_obj_copyup_callback()
+ rbd_img_obj_callback()
+ <obj_request-2/2 reply>
+ handle_reply()
+ rbd_osd_req_callback()
+ rbd_osd_trivial_callback()
+ for_each_obj_request(obj_request->img_request) {
+ rbd_img_obj_end_request(obj_request-1/2)
+ rbd_img_obj_end_request(obj_request-2/2) <--
+ }
+
+Calling rbd_img_obj_end_request() on such a request leads to trouble,
+in particular because its ->xfferred is 0. We report 0 to the block
+layer with blk_update_request(), get back 1 for "this request has more
+data in flight" and then trip on
+
+ rbd_assert(more ^ (which == img_request->obj_request_count));
+
+with rhs (which == ...) being 1 because rbd_img_obj_end_request() has
+been called for both requests and lhs (more) being 1 because we haven't
+got a chance to set ->xfferred in rbd_img_obj_copyup_callback() yet.
+
+To fix this, leverage that rbd wants to call class methods in only two
+cases: one is a generic method call wrapper (obj_request is standalone)
+and the other is a copyup (obj_request is part of an img_request). So
+make a dedicated handler for CEPH_OSD_OP_CALL and directly invoke
+rbd_img_obj_copyup_callback() from it if obj_request is part of an
+img_request, similar to how CEPH_OSD_OP_READ handler invokes
+rbd_img_obj_request_read_callback().
+
+Since rbd_img_obj_copyup_callback() is now being called from the OSD
+request callback (only), it is renamed to rbd_osd_copyup_callback().
+
+Cc: Alex Elder <elder@linaro.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Alex Elder <elder@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/rbd.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -508,6 +508,7 @@ void rbd_warn(struct rbd_device *rbd_dev
+ # define rbd_assert(expr) ((void) 0)
+ #endif /* !RBD_DEBUG */
+
++static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
+ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+@@ -1651,6 +1652,16 @@ static void rbd_osd_stat_callback(struct
+ obj_request_done_set(obj_request);
+ }
+
++static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
++{
++ dout("%s: obj %p\n", __func__, obj_request);
++
++ if (obj_request_img_data_test(obj_request))
++ rbd_osd_copyup_callback(obj_request);
++ else
++ obj_request_done_set(obj_request);
++}
++
+ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ struct ceph_msg *msg)
+ {
+@@ -1689,6 +1700,8 @@ static void rbd_osd_req_callback(struct
+ rbd_osd_stat_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_CALL:
++ rbd_osd_call_callback(obj_request);
++ break;
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ case CEPH_OSD_OP_WATCH:
+ rbd_osd_trivial_callback(obj_request);
+@@ -2275,13 +2288,15 @@ out_unwind:
+ }
+
+ static void
+-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
++rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
+ {
+ struct rbd_img_request *img_request;
+ struct rbd_device *rbd_dev;
+ struct page **pages;
+ u32 page_count;
+
++ dout("%s: obj %p\n", __func__, obj_request);
++
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+ rbd_assert(obj_request_img_data_test(obj_request));
+ img_request = obj_request->img_request;
+@@ -2307,9 +2322,7 @@ rbd_img_obj_copyup_callback(struct rbd_o
+ if (!obj_request->result)
+ obj_request->xferred = obj_request->length;
+
+- /* Finish up with the normal image object callback */
+-
+- rbd_img_obj_callback(obj_request);
++ obj_request_done_set(obj_request);
+ }
+
+ static void
+@@ -2406,7 +2419,6 @@ rbd_img_obj_parent_read_full_callback(st
+
+ /* All set, send it off. */
+
+- orig_request->callback = rbd_img_obj_copyup_callback;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ img_result = rbd_obj_request_submit(osdc, orig_request);
+ if (!img_result)
md-use-kzalloc-when-bitmap-is-disabled.patch
arm-sunxi-fix-build-for-thumb2_kernel.patch
sparc64-fix-userspace-fpu-register-corruptions.patch
+asoc-pcm1681-fix-setting-de-emphasis-sampling-rate-selection.patch
+x86-xen-probe-target-addresses-in-set_aliased_prot-before-the-hypercall.patch
+xen-gntdevt-fix-race-condition-in-gntdev_release.patch
+crypto-ixp4xx-remove-bogus-bug_on-on-scattered-dst-buffer.patch
+rbd-fix-copyup-completion-race.patch
+arm-omap2-hwmod-fix-_wait_target_ready-for-hwmods-without-sysc.patch
--- /dev/null
+From aa1acff356bbedfd03b544051f5b371746735d89 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 30 Jul 2015 14:31:31 -0700
+Subject: x86/xen: Probe target addresses in set_aliased_prot() before the hypercall
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit aa1acff356bbedfd03b544051f5b371746735d89 upstream.
+
+The update_va_mapping hypercall can fail if the VA isn't present
+in the guest's page tables. Under certain loads, this can
+result in an OOPS when the target address is in unpopulated vmap
+space.
+
+While we're at it, add comments to help explain what's going on.
+
+This isn't a great long-term fix. This code should probably be
+changed to use something like set_memory_ro.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: David Vrabel <dvrabel@cantab.net>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jan Beulich <jbeulich@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: security@kernel.org <security@kernel.org>
+Cc: xen-devel <xen-devel@lists.xen.org>
+Link: http://lkml.kernel.org/r/0b0e55b995cda11e7829f140b833ef932fcabe3a.1438291540.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/enlighten.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -481,6 +481,7 @@ static void set_aliased_prot(void *v, pg
+ pte_t pte;
+ unsigned long pfn;
+ struct page *page;
++ unsigned char dummy;
+
+ ptep = lookup_address((unsigned long)v, &level);
+ BUG_ON(ptep == NULL);
+@@ -490,6 +491,32 @@ static void set_aliased_prot(void *v, pg
+
+ pte = pfn_pte(pfn, prot);
+
++ /*
++ * Careful: update_va_mapping() will fail if the virtual address
++ * we're poking isn't populated in the page tables. We don't
++ * need to worry about the direct map (that's always in the page
++ * tables), but we need to be careful about vmap space. In
++ * particular, the top level page table can lazily propagate
++ * entries between processes, so if we've switched mms since we
++ * vmapped the target in the first place, we might not have the
++ * top-level page table entry populated.
++ *
++ * We disable preemption because we want the same mm active when
++ * we probe the target and when we issue the hypercall. We'll
++ * have the same nominal mm, but if we're a kernel thread, lazy
++ * mm dropping could change our pgd.
++ *
++ * Out of an abundance of caution, this uses __get_user() to fault
++ * in the target address just in case there's some obscure case
++ * in which the target address isn't readable.
++ */
++
++ preempt_disable();
++
++ pagefault_disable(); /* Avoid warnings due to being atomic. */
++ __get_user(dummy, (unsigned char __user __force *)v);
++ pagefault_enable();
++
+ if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ BUG();
+
+@@ -501,6 +528,8 @@ static void set_aliased_prot(void *v, pg
+ BUG();
+ } else
+ kmap_flush_unused();
++
++ preempt_enable();
+ }
+
+ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -508,6 +537,17 @@ static void xen_alloc_ldt(struct desc_st
+ const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ int i;
+
++ /*
++ * We need to mark the all aliases of the LDT pages RO. We
++ * don't need to call vm_flush_aliases(), though, since that's
++ * only responsible for flushing aliases out the TLBs, not the
++ * page tables, and Xen will flush the TLB for us if needed.
++ *
++ * To avoid confusing future readers: none of this is necessary
++ * to load the LDT. The hypervisor only checks this when the
++ * LDT is faulted in due to subsequent descriptor access.
++ */
++
+ for(i = 0; i < entries; i += entries_per_page)
+ set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+ }
--- /dev/null
+From 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
+ <marmarek@invisiblethingslab.com>
+Date: Fri, 26 Jun 2015 03:28:24 +0200
+Subject: xen/gntdevt: Fix race condition in gntdev_release()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
+
+commit 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 upstream.
+
+While gntdev_release() is called the MMU notifier is still registered
+and can traverse priv->maps list even if no pages are mapped (which is
+the case -- gntdev_release() is called after all). But
+gntdev_release() will clear that list, so make sure that only one of
+those things happens at the same time.
+
+Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -529,12 +529,14 @@ static int gntdev_release(struct inode *
+
+ pr_debug("priv %p\n", priv);
+
++ mutex_lock(&priv->lock);
+ while (!list_empty(&priv->maps)) {
+ map = list_entry(priv->maps.next, struct grant_map, next);
+ list_del(&map->next);
+ gntdev_put_map(NULL /* already removed */, map);
+ }
+ WARN_ON(!list_empty(&priv->freeable_maps));
++ mutex_unlock(&priv->lock);
+
+ if (use_ptemod)
+ mmu_notifier_unregister(&priv->mn, priv->mm);