--- /dev/null
+From e053f96b1a00022b4e2c7ceb7ac0229646626507 Mon Sep 17 00:00:00 2001
+From: Denis Carikli <denis@eukrea.com>
+Date: Thu, 23 Jul 2015 10:31:12 +0200
+Subject: ARM: dts: i.MX35: Fix can support.
+
+From: Denis Carikli <denis@eukrea.com>
+
+commit e053f96b1a00022b4e2c7ceb7ac0229646626507 upstream.
+
+Since commit 3d42a379b6fa5b46058e3302b1802b29f64865bb
+("can: flexcan: add 2nd clock to support imx53 and newer")
+the can driver requires a dt nodes to have a second clock.
+Add them to imx35 to fix probing the flex can driver on the
+respective platforms.
+
+Signed-off-by: Denis Carikli <denis@eukrea.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/imx35.dtsi | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/boot/dts/imx35.dtsi
++++ b/arch/arm/boot/dts/imx35.dtsi
+@@ -286,8 +286,8 @@
+ can1: can@53fe4000 {
+ compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ reg = <0x53fe4000 0x1000>;
+- clocks = <&clks 33>;
+- clock-names = "ipg";
++ clocks = <&clks 33>, <&clks 33>;
++ clock-names = "ipg", "per";
+ interrupts = <43>;
+ status = "disabled";
+ };
+@@ -295,8 +295,8 @@
+ can2: can@53fe8000 {
+ compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ reg = <0x53fe8000 0x1000>;
+- clocks = <&clks 34>;
+- clock-names = "ipg";
++ clocks = <&clks 34>, <&clks 34>;
++ clock-names = "ipg", "per";
+ interrupts = <44>;
+ status = "disabled";
+ };
--- /dev/null
+From 9a258afa928b45e6dd2efcac46ccf7eea705d35a Mon Sep 17 00:00:00 2001
+From: Roger Quadros <rogerq@ti.com>
+Date: Thu, 16 Jul 2015 16:16:44 +0300
+Subject: ARM: OMAP2+: hwmod: Fix _wait_target_ready() for hwmods without sysc
+
+From: Roger Quadros <rogerq@ti.com>
+
+commit 9a258afa928b45e6dd2efcac46ccf7eea705d35a upstream.
+
+For hwmods without sysc, _init_mpu_rt_base(oh) won't be called and so
+_find_mpu_rt_port(oh) will return NULL thus preventing ready state check
+on those modules after the module is enabled.
+
+This can potentially cause a bus access error if the module is accessed
+before the module is ready.
+
+Fix this by unconditionally calling _init_mpu_rt_base() during hwmod
+_init(). Do ioremap only if we need SYSC access.
+
+Eventhough _wait_target_ready() check doesn't really need MPU RT port but
+just the PRCM registers, we still mandate that the hwmod must have an
+MPU RT port if ready state check needs to be done. Else it would mean that
+the module is not accessible by MPU so there is no point in waiting
+for target to be ready.
+
+e.g. this fixes the below DCAN bus access error on AM437x-gp-evm.
+
+[ 16.672978] ------------[ cut here ]------------
+[ 16.677885] WARNING: CPU: 0 PID: 1580 at drivers/bus/omap_l3_noc.c:147 l3_interrupt_handler+0x234/0x35c()
+[ 16.687946] 44000000.ocp:L3 Custom Error: MASTER M2 (64-bit) TARGET L4_PER_0 (Read): Data Access in User mode during Functional access
+[ 16.700654] Modules linked in: xhci_hcd btwilink ti_vpfe dwc3 videobuf2_core ov2659 bluetooth v4l2_common videodev ti_am335x_adc kfifo_buf industrialio c_can_platform videobuf2_dma_contig media snd_soc_tlv320aic3x pixcir_i2c_ts c_can dc
+[ 16.731144] CPU: 0 PID: 1580 Comm: rpc.statd Not tainted 3.14.26-02561-gf733aa036398 #180
+[ 16.739747] Backtrace:
+[ 16.742336] [<c0011108>] (dump_backtrace) from [<c00112a4>] (show_stack+0x18/0x1c)
+[ 16.750285] r6:00000093 r5:00000009 r4:eab5b8a8 r3:00000000
+[ 16.756252] [<c001128c>] (show_stack) from [<c05a4418>] (dump_stack+0x20/0x28)
+[ 16.763870] [<c05a43f8>] (dump_stack) from [<c0037120>] (warn_slowpath_common+0x6c/0x8c)
+[ 16.772408] [<c00370b4>] (warn_slowpath_common) from [<c00371e4>] (warn_slowpath_fmt+0x38/0x40)
+[ 16.781550] r8:c05d1f90 r7:c0730844 r6:c0730448 r5:80080003 r4:ed0cd210
+[ 16.788626] [<c00371b0>] (warn_slowpath_fmt) from [<c027fa94>] (l3_interrupt_handler+0x234/0x35c)
+[ 16.797968] r3:ed0cd480 r2:c0730508
+[ 16.801747] [<c027f860>] (l3_interrupt_handler) from [<c0063758>] (handle_irq_event_percpu+0x54/0x1bc)
+[ 16.811533] r10:ed005600 r9:c084855b r8:0000002a r7:00000000 r6:00000000 r5:0000002a
+[ 16.819780] r4:ed0e6d80
+[ 16.822453] [<c0063704>] (handle_irq_event_percpu) from [<c00638f0>] (handle_irq_event+0x30/0x40)
+[ 16.831789] r10:eb2b6938 r9:eb2b6960 r8:bf011420 r7:fa240100 r6:00000000 r5:0000002a
+[ 16.840052] r4:ed005600
+[ 16.842744] [<c00638c0>] (handle_irq_event) from [<c00661d8>] (handle_fasteoi_irq+0x74/0x128)
+[ 16.851702] r4:ed005600 r3:00000000
+[ 16.855479] [<c0066164>] (handle_fasteoi_irq) from [<c0063068>] (generic_handle_irq+0x28/0x38)
+[ 16.864523] r4:0000002a r3:c0066164
+[ 16.868294] [<c0063040>] (generic_handle_irq) from [<c000ef60>] (handle_IRQ+0x38/0x8c)
+[ 16.876612] r4:c081c640 r3:00000202
+[ 16.880380] [<c000ef28>] (handle_IRQ) from [<c00084f0>] (gic_handle_irq+0x30/0x5c)
+[ 16.888328] r6:eab5ba38 r5:c0804460 r4:fa24010c r3:00000100
+[ 16.894303] [<c00084c0>] (gic_handle_irq) from [<c05a8d80>] (__irq_svc+0x40/0x50)
+[ 16.902193] Exception stack(0xeab5ba38 to 0xeab5ba80)
+[ 16.907499] ba20: 00000000 00000006
+[ 16.916108] ba40: fa1d0000 fa1d0008 ed3d3000 eab5bab4 ed3d3460 c0842af4 bf011420 eb2b6960
+[ 16.924716] ba60: eb2b6938 eab5ba8c eab5ba90 eab5ba80 bf035220 bf07702c 600f0013 ffffffff
+[ 16.933317] r7:eab5ba6c r6:ffffffff r5:600f0013 r4:bf07702c
+[ 16.939317] [<bf077000>] (c_can_plat_read_reg_aligned_to_16bit [c_can_platform]) from [<bf035220>] (c_can_get_berr_counter+0x38/0x64 [c_can])
+[ 16.952696] [<bf0351e8>] (c_can_get_berr_counter [c_can]) from [<bf010294>] (can_fill_info+0x124/0x15c [can_dev])
+[ 16.963480] r5:ec8c9740 r4:ed3d3000
+[ 16.967253] [<bf010170>] (can_fill_info [can_dev]) from [<c0502fa8>] (rtnl_fill_ifinfo+0x58c/0x8fc)
+[ 16.976749] r6:ec8c9740 r5:ed3d3000 r4:eb2b6780
+[ 16.981613] [<c0502a1c>] (rtnl_fill_ifinfo) from [<c0503408>] (rtnl_dump_ifinfo+0xf0/0x1dc)
+[ 16.990401] r10:ec8c9740 r9:00000000 r8:00000000 r7:00000000 r6:ebd4d1b4 r5:ed3d3000
+[ 16.998671] r4:00000000
+[ 17.001342] [<c0503318>] (rtnl_dump_ifinfo) from [<c050e6e4>] (netlink_dump+0xa8/0x1e0)
+[ 17.009772] r10:00000000 r9:00000000 r8:c0503318 r7:ebf3e6c0 r6:ebd4d1b4 r5:ec8c9740
+[ 17.018050] r4:ebd4d000
+[ 17.020714] [<c050e63c>] (netlink_dump) from [<c050ec10>] (__netlink_dump_start+0x104/0x154)
+[ 17.029591] r6:eab5bd34 r5:ec8c9980 r4:ebd4d000
+[ 17.034454] [<c050eb0c>] (__netlink_dump_start) from [<c0505604>] (rtnetlink_rcv_msg+0x110/0x1f4)
+[ 17.043778] r7:00000000 r6:ec8c9980 r5:00000f40 r4:ebf3e6c0
+[ 17.049743] [<c05054f4>] (rtnetlink_rcv_msg) from [<c05108e8>] (netlink_rcv_skb+0xb4/0xc8)
+[ 17.058449] r8:eab5bdac r7:ec8c9980 r6:c05054f4 r5:ec8c9980 r4:ebf3e6c0
+[ 17.065534] [<c0510834>] (netlink_rcv_skb) from [<c0504134>] (rtnetlink_rcv+0x24/0x2c)
+[ 17.073854] r6:ebd4d000 r5:00000014 r4:ec8c9980 r3:c0504110
+[ 17.079846] [<c0504110>] (rtnetlink_rcv) from [<c05102ac>] (netlink_unicast+0x180/0x1ec)
+[ 17.088363] r4:ed0c6800 r3:c0504110
+[ 17.092113] [<c051012c>] (netlink_unicast) from [<c0510670>] (netlink_sendmsg+0x2ac/0x380)
+[ 17.100813] r10:00000000 r8:00000008 r7:ec8c9980 r6:ebd4d000 r5:eab5be70 r4:eab5bee4
+[ 17.109083] [<c05103c4>] (netlink_sendmsg) from [<c04dfdb4>] (sock_sendmsg+0x90/0xb0)
+[ 17.117305] r10:00000000 r9:eab5a000 r8:becdda3c r7:0000000c r6:ea978400 r5:eab5be70
+[ 17.125563] r4:c05103c4
+[ 17.128225] [<c04dfd24>] (sock_sendmsg) from [<c04e1c28>] (SyS_sendto+0xb8/0xdc)
+[ 17.136001] r6:becdda5c r5:00000014 r4:ecd37040
+[ 17.140876] [<c04e1b70>] (SyS_sendto) from [<c000e680>] (ret_fast_syscall+0x0/0x30)
+[ 17.148923] r10:00000000 r8:c000e804 r7:00000122 r6:becdda5c r5:0000000c r4:becdda5c
+[ 17.157169] ---[ end trace 2b71e15b38f58bad ]---
+
+Fixes: 6423d6df1440 ("ARM: OMAP2+: hwmod: check for module address space during init")
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Signed-off-by: Paul Walmsley <paul@pwsan.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-omap2/omap_hwmod.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct de
+ * registers. This address is needed early so the OCP registers that
+ * are part of the device's address space can be ioremapped properly.
+ *
++ * If SYSC access is not needed, the registers will not be remapped
++ * and non-availability of MPU access is not treated as an error.
++ *
+ * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
+ * -ENXIO on absent or invalid register target address space.
+ */
+@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(stru
+
+ _save_mpu_port_index(oh);
+
++ /* if we don't need sysc access we don't need to ioremap */
++ if (!oh->class->sysc)
++ return 0;
++
++ /* we can't continue without MPU PORT if we need sysc access */
+ if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+ return -ENXIO;
+
+@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(stru
+ oh->name);
+
+ /* Extract the IO space from device tree blob */
+- if (!np)
++ if (!np) {
++ pr_err("omap_hwmod: %s: no dt node\n", oh->name);
+ return -ENXIO;
++ }
+
+ va_start = of_iomap(np, index + oh->mpu_rt_idx);
+ } else {
+@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmo
+ oh->name, np->name);
+ }
+
+- if (oh->class->sysc) {
+- r = _init_mpu_rt_base(oh, NULL, index, np);
+- if (r < 0) {
+- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+- oh->name);
+- return 0;
+- }
++ r = _init_mpu_rt_base(oh, NULL, index, np);
++ if (r < 0) {
++ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
++ oh->name);
++ return 0;
+ }
+
+ r = _init_clocks(oh, NULL);
--- /dev/null
+From a798c24a69b64f09e2d323ac8155a36373e5d5fd Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Tue, 21 Jul 2015 11:51:35 +0200
+Subject: ASoC: dapm: Don't add prefix to widget stream name
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit a798c24a69b64f09e2d323ac8155a36373e5d5fd upstream.
+
+Commit fdb6eb0a1287 ("ASoC: dapm: Modify widget stream name according to
+prefix") fixed the case where a DAPM route between a DAI widget and a
+DAC/ADC/AIF widget with a matching stream name was not created when the
+DAPM context was using a prefix.
+
+Unfortunately the patch introduced a few issues on its own like leaking the
+dynamically allocated stream name memory and also not checking whether the
+allocation succeeded in the first place.
+
+It is also incomplete in that it still does not handle the case where
+stream name of the widget is a substring of the stream name of the DAI,
+which is explicitly allowed and works fine if no DAPM prefix is used.
+
+Revert the commit and take a slightly different approach to solving the
+issue. Instead of comparing the widget's stream name to the name of the DAI
+widget compare it to the stream name of the DAI widget. The stream name of
+the DAI widget is identical to the name of the DAI widget except that it
+wont have the DAPM prefix added. So this approach behaves identical
+regardless to whether the DAPM context uses a prefix or not.
+
+We don't have to worry about potentially matching with a widget with the
+same stream name, but from a different DAPM context with a different
+prefix, since the code already makes sure that both the DAI widget and the
+matched widget are from the same DAPM context.
+
+Fixes: fdb6eb0a1287 ("ASoC: dapm: Modify widget stream name according to prefix")
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/soc-dapm.c | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3109,16 +3109,10 @@ snd_soc_dapm_new_control(struct snd_soc_
+ }
+
+ prefix = soc_dapm_prefix(dapm);
+- if (prefix) {
++ if (prefix)
+ w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
+- if (widget->sname)
+- w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
+- widget->sname);
+- } else {
++ else
+ w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
+- if (widget->sname)
+- w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
+- }
+ if (w->name == NULL) {
+ kfree(w);
+ return NULL;
+@@ -3566,7 +3560,7 @@ int snd_soc_dapm_link_dai_widgets(struct
+ break;
+ }
+
+- if (!w->sname || !strstr(w->sname, dai_w->name))
++ if (!w->sname || !strstr(w->sname, dai_w->sname))
+ continue;
+
+ if (dai_w->id == snd_soc_dapm_dai_in) {
--- /dev/null
+From e50b1e06b79e9d51efbff9627b4dd407184ef43f Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Mon, 6 Jul 2015 17:01:24 +0200
+Subject: ASoC: dapm: Lock during userspace access
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit e50b1e06b79e9d51efbff9627b4dd407184ef43f upstream.
+
+The DAPM lock must be held when accessing the DAPM graph status through
+sysfs or debugfs, otherwise concurrent changes to the graph can result in
+undefined behaviour.
+
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/soc-dapm.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1811,6 +1811,7 @@ static ssize_t dapm_widget_power_read_fi
+ size_t count, loff_t *ppos)
+ {
+ struct snd_soc_dapm_widget *w = file->private_data;
++ struct snd_soc_card *card = w->dapm->card;
+ char *buf;
+ int in, out;
+ ssize_t ret;
+@@ -1820,6 +1821,8 @@ static ssize_t dapm_widget_power_read_fi
+ if (!buf)
+ return -ENOMEM;
+
++ mutex_lock(&card->dapm_mutex);
++
+ /* Supply widgets are not handled by is_connected_{input,output}_ep() */
+ if (w->is_supply) {
+ in = 0;
+@@ -1866,6 +1869,8 @@ static ssize_t dapm_widget_power_read_fi
+ p->sink->name);
+ }
+
++ mutex_unlock(&card->dapm_mutex);
++
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+@@ -2140,11 +2145,15 @@ static ssize_t dapm_widget_show(struct d
+ struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
+ int i, count = 0;
+
++ mutex_lock(&rtd->card->dapm_mutex);
++
+ for (i = 0; i < rtd->num_codecs; i++) {
+ struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
+ count += dapm_widget_show_codec(codec, buf + count);
+ }
+
++ mutex_unlock(&rtd->card->dapm_mutex);
++
+ return count;
+ }
+
--- /dev/null
+From 412efa73dcd3bd03c1838c91e094533a95529039 Mon Sep 17 00:00:00 2001
+From: Shilpa Sreeramalu <shilpa.sreeramalu@intel.com>
+Date: Wed, 15 Jul 2015 07:58:09 -0700
+Subject: ASoC: Intel: Get correct usage_count value to load firmware
+
+From: Shilpa Sreeramalu <shilpa.sreeramalu@intel.com>
+
+commit 412efa73dcd3bd03c1838c91e094533a95529039 upstream.
+
+The usage_count variable was read before it was set to the correct
+value, due to which the firmware load was failing. Because of this
+IPC messages sent to the firmware were timing out causing a delay
+of about 1 second while playing audio from the internal speakers.
+
+With this patch the usage_count is read after the function call
+pm_runtime_get_sync which will increment the usage_count variable
+and the firmware load is successful and all the IPC messages are
+processed correctly.
+
+Signed-off-by: Shilpa Sreeramalu <shilpa.sreeramalu@intel.com>
+Signed-off-by: Fang, Yang A <yang.a.fang@intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/intel/atom/sst/sst_drv_interface.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
++++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
+@@ -42,6 +42,11 @@
+ #define MIN_FRAGMENT_SIZE (50 * 1024)
+ #define MAX_FRAGMENT_SIZE (1024 * 1024)
+ #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
++#ifdef CONFIG_PM
++#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
++#else
++#define GET_USAGE_COUNT(dev) 1
++#endif
+
+ int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
+ {
+@@ -141,15 +146,9 @@ static int sst_power_control(struct devi
+ int ret = 0;
+ int usage_count = 0;
+
+-#ifdef CONFIG_PM
+- usage_count = atomic_read(&dev->power.usage_count);
+-#else
+- usage_count = 1;
+-#endif
+-
+ if (state == true) {
+ ret = pm_runtime_get_sync(dev);
+-
++ usage_count = GET_USAGE_COUNT(dev);
+ dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
+@@ -164,6 +163,7 @@ static int sst_power_control(struct devi
+ }
+ }
+ } else {
++ usage_count = GET_USAGE_COUNT(dev);
+ dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
+ return sst_pm_runtime_put(ctx);
+ }
--- /dev/null
+From fa8173a3ef0570affde7da352de202190b3786c2 Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@ingics.com>
+Date: Thu, 23 Jul 2015 23:22:26 +0800
+Subject: ASoC: pcm1681: Fix setting de-emphasis sampling rate selection
+
+From: Axel Lin <axel.lin@ingics.com>
+
+commit fa8173a3ef0570affde7da352de202190b3786c2 upstream.
+
+The de-emphasis sampling rate selection is controlled by BIT[3:4] of
+PCM1681_DEEMPH_CONTROL register. Do proper left shift to set it.
+
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Acked-by: Marek Belisko <marek.belisko@streamunlimited.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/pcm1681.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/pcm1681.c
++++ b/sound/soc/codecs/pcm1681.c
+@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd
+
+ if (val != -1) {
+ regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+- PCM1681_DEEMPH_RATE_MASK, val);
++ PCM1681_DEEMPH_RATE_MASK, val << 3);
+ enable = 1;
+ } else
+ enable = 0;
--- /dev/null
+From a6c2a32ac83567f15e9af3dcbc73148ce68b2ced Mon Sep 17 00:00:00 2001
+From: Ben Zhang <benzh@chromium.org>
+Date: Tue, 21 Jul 2015 14:46:26 -0700
+Subject: ASoC: ssm4567: Keep TDM_BCLKS in ssm4567_set_dai_fmt
+
+From: Ben Zhang <benzh@chromium.org>
+
+commit a6c2a32ac83567f15e9af3dcbc73148ce68b2ced upstream.
+
+The regmap_write in ssm4567_set_dai_fmt accidentally clears the
+TDM_BCLKS field which was set earlier by ssm4567_set_tdm_slot.
+
+This patch fixes it by using regmap_update_bits with proper mask.
+
+Signed-off-by: Ben Zhang <benzh@chromium.org>
+Acked-by: Lars-Peter Clausen <lars@metafoo.de>
+Acked-by: Anatol Pomozov <anatol.pomozov@gmail.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/ssm4567.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/ssm4567.c
++++ b/sound/soc/codecs/ssm4567.c
+@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct sn
+ if (invert_fclk)
+ ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
+
+- return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
++ return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
++ SSM4567_SAI_CTRL_1_BCLK |
++ SSM4567_SAI_CTRL_1_FSYNC |
++ SSM4567_SAI_CTRL_1_LJ |
++ SSM4567_SAI_CTRL_1_TDM |
++ SSM4567_SAI_CTRL_1_PDM,
++ ctrl1);
+ }
+
+ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
--- /dev/null
+From f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Wed, 22 Jul 2015 18:05:35 +0800
+Subject: crypto: ixp4xx - Remove bogus BUG_ON on scattered dst buffer
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 upstream.
+
+This patch removes a bogus BUG_ON in the ablkcipher path that
+triggers when the destination buffer is different from the source
+buffer and is scattered.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ixp4xx_crypto.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkciphe
+ crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ /* This was never tested by Intel
+ * for more than one dst buffer, I think. */
+- BUG_ON(req->dst->length < nbytes);
+ req_ctx->dst = NULL;
+ if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+ flags, DMA_FROM_DEVICE))
--- /dev/null
+From 6f043b50da8e03bdcc5703fd37ea45bc6892432f Mon Sep 17 00:00:00 2001
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+Date: Tue, 21 Jul 2015 22:07:47 -0700
+Subject: crypto: qat - Fix invalid synchronization between register/unregister sym algs
+
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+
+commit 6f043b50da8e03bdcc5703fd37ea45bc6892432f upstream.
+
+The synchronization method used atomic was bogus.
+Use a proper synchronization with mutex.
+
+Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/qat/qat_common/qat_algs.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -73,7 +73,8 @@
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+-static atomic_t active_dev;
++static DEFINE_MUTEX(algs_lock);
++static unsigned int active_devs;
+
+ struct qat_alg_buf {
+ uint32_t len;
+@@ -1271,7 +1272,10 @@ static struct crypto_alg qat_algs[] = {
+
+ int qat_algs_register(void)
+ {
+- if (atomic_add_return(1, &active_dev) == 1) {
++ int ret = 0;
++
++ mutex_lock(&algs_lock);
++ if (++active_devs == 1) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+@@ -1280,21 +1284,25 @@ int qat_algs_register(void)
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
+ CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+
+- return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
++ ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ }
+- return 0;
++ mutex_unlock(&algs_lock);
++ return ret;
+ }
+
+ int qat_algs_unregister(void)
+ {
+- if (atomic_sub_return(1, &active_dev) == 0)
+- return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+- return 0;
++ int ret = 0;
++
++ mutex_lock(&algs_lock);
++ if (--active_devs == 0)
++ ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
++ mutex_unlock(&algs_lock);
++ return ret;
+ }
+
+ int qat_algs_init(void)
+ {
+- atomic_set(&active_dev, 0);
+ crypto_get_default_rng();
+ return 0;
+ }
--- /dev/null
+From 17fb874dee093139923af8ed36061faa92cc8e79 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Fri, 24 Jul 2015 13:13:30 +0200
+Subject: hwrng: core - correct error check of kthread_run call
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 17fb874dee093139923af8ed36061faa92cc8e79 upstream.
+
+The kthread_run() function can return two different error values
+but the hwrng core only checks for -ENOMEM. If the other error
+value -EINTR is returned it is assigned to hwrng_fill and later
+used on a kthread_stop() call which naturally crashes.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/hw_random/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
+ static void start_khwrngd(void)
+ {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+- if (hwrng_fill == ERR_PTR(-ENOMEM)) {
++ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed");
+ hwrng_fill = NULL;
+ }
--- /dev/null
+From 2761713d35e370fd640b5781109f753066b746c4 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Thu, 16 Jul 2015 17:36:11 +0300
+Subject: rbd: fix copyup completion race
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 2761713d35e370fd640b5781109f753066b746c4 upstream.
+
+For write/discard obj_requests that involved a copyup method call, the
+opcode of the first op is CEPH_OSD_OP_CALL and the ->callback is
+rbd_img_obj_copyup_callback(). The latter frees copyup pages, sets
+->xferred and delegates to rbd_img_obj_callback(), the "normal" image
+object callback, for reporting to block layer and putting refs.
+
+rbd_osd_req_callback() however treats CEPH_OSD_OP_CALL as a trivial op,
+which means obj_request is marked done in rbd_osd_trivial_callback(),
+*before* ->callback is invoked and rbd_img_obj_copyup_callback() has
+a chance to run. Marking obj_request done essentially means giving
+rbd_img_obj_callback() a license to end it at any moment, so if another
+obj_request from the same img_request is being completed concurrently,
+rbd_img_obj_end_request() may very well be called on such prematurally
+marked done request:
+
+<obj_request-1/2 reply>
+handle_reply()
+ rbd_osd_req_callback()
+ rbd_osd_trivial_callback()
+ rbd_obj_request_complete()
+ rbd_img_obj_copyup_callback()
+ rbd_img_obj_callback()
+ <obj_request-2/2 reply>
+ handle_reply()
+ rbd_osd_req_callback()
+ rbd_osd_trivial_callback()
+ for_each_obj_request(obj_request->img_request) {
+ rbd_img_obj_end_request(obj_request-1/2)
+ rbd_img_obj_end_request(obj_request-2/2) <--
+ }
+
+Calling rbd_img_obj_end_request() on such a request leads to trouble,
+in particular because its ->xfferred is 0. We report 0 to the block
+layer with blk_update_request(), get back 1 for "this request has more
+data in flight" and then trip on
+
+ rbd_assert(more ^ (which == img_request->obj_request_count));
+
+with rhs (which == ...) being 1 because rbd_img_obj_end_request() has
+been called for both requests and lhs (more) being 1 because we haven't
+got a chance to set ->xfferred in rbd_img_obj_copyup_callback() yet.
+
+To fix this, leverage that rbd wants to call class methods in only two
+cases: one is a generic method call wrapper (obj_request is standalone)
+and the other is a copyup (obj_request is part of an img_request). So
+make a dedicated handler for CEPH_OSD_OP_CALL and directly invoke
+rbd_img_obj_copyup_callback() from it if obj_request is part of an
+img_request, similar to how CEPH_OSD_OP_READ handler invokes
+rbd_img_obj_request_read_callback().
+
+Since rbd_img_obj_copyup_callback() is now being called from the OSD
+request callback (only), it is renamed to rbd_osd_copyup_callback().
+
+Cc: Alex Elder <elder@linaro.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Alex Elder <elder@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/rbd.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -522,6 +522,7 @@ void rbd_warn(struct rbd_device *rbd_dev
+ # define rbd_assert(expr) ((void) 0)
+ #endif /* !RBD_DEBUG */
+
++static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
+ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+@@ -1797,6 +1798,16 @@ static void rbd_osd_stat_callback(struct
+ obj_request_done_set(obj_request);
+ }
+
++static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
++{
++ dout("%s: obj %p\n", __func__, obj_request);
++
++ if (obj_request_img_data_test(obj_request))
++ rbd_osd_copyup_callback(obj_request);
++ else
++ obj_request_done_set(obj_request);
++}
++
+ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ struct ceph_msg *msg)
+ {
+@@ -1845,6 +1856,8 @@ static void rbd_osd_req_callback(struct
+ rbd_osd_discard_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_CALL:
++ rbd_osd_call_callback(obj_request);
++ break;
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ case CEPH_OSD_OP_WATCH:
+ rbd_osd_trivial_callback(obj_request);
+@@ -2509,13 +2522,15 @@ out_unwind:
+ }
+
+ static void
+-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
++rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
+ {
+ struct rbd_img_request *img_request;
+ struct rbd_device *rbd_dev;
+ struct page **pages;
+ u32 page_count;
+
++ dout("%s: obj %p\n", __func__, obj_request);
++
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
+ obj_request->type == OBJ_REQUEST_NODATA);
+ rbd_assert(obj_request_img_data_test(obj_request));
+@@ -2542,9 +2557,7 @@ rbd_img_obj_copyup_callback(struct rbd_o
+ if (!obj_request->result)
+ obj_request->xferred = obj_request->length;
+
+- /* Finish up with the normal image object callback */
+-
+- rbd_img_obj_callback(obj_request);
++ obj_request_done_set(obj_request);
+ }
+
+ static void
+@@ -2629,7 +2642,6 @@ rbd_img_obj_parent_read_full_callback(st
+
+ /* All set, send it off. */
+
+- orig_request->callback = rbd_img_obj_copyup_callback;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ img_result = rbd_obj_request_submit(osdc, orig_request);
+ if (!img_result)
sparc64-fix-userspace-fpu-register-corruptions.patch
clk-keystone-add-support-for-post-divider-register-for-main-pll.patch
arm-dts-keystone-fix-dt-bindings-to-use-post-div-register.patch
+asoc-intel-get-correct-usage_count-value-to-load-firmware.patch
+asoc-ssm4567-keep-tdm_bclks-in-ssm4567_set_dai_fmt.patch
+asoc-pcm1681-fix-setting-de-emphasis-sampling-rate-selection.patch
+asoc-dapm-lock-during-userspace-access.patch
+asoc-dapm-don-t-add-prefix-to-widget-stream-name.patch
+x86-xen-probe-target-addresses-in-set_aliased_prot-before-the-hypercall.patch
+xen-gntdevt-fix-race-condition-in-gntdev_release.patch
+xen-events-fifo-handle-linked-events-when-closing-a-port.patch
+x86-ldt-make-modify_ldt-synchronous.patch
+hwrng-core-correct-error-check-of-kthread_run-call.patch
+crypto-qat-fix-invalid-synchronization-between-register-unregister-sym-algs.patch
+crypto-ixp4xx-remove-bogus-bug_on-on-scattered-dst-buffer.patch
+rbd-fix-copyup-completion-race.patch
+arm-dts-i.mx35-fix-can-support.patch
+arm-omap2-hwmod-fix-_wait_target_ready-for-hwmods-without-sysc.patch
--- /dev/null
+From 37868fe113ff2ba814b3b4eb12df214df555f8dc Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 30 Jul 2015 14:31:32 -0700
+Subject: x86/ldt: Make modify_ldt synchronous
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 37868fe113ff2ba814b3b4eb12df214df555f8dc upstream.
+
+modify_ldt() has questionable locking and does not synchronize
+threads. Improve it: redesign the locking and synchronize all
+threads' LDTs using an IPI on all modifications.
+
+This will dramatically slow down modify_ldt in multithreaded
+programs, but there shouldn't be any multithreaded programs that
+care about modify_ldt's performance in the first place.
+
+This fixes some fallout from the CVE-2015-5157 fixes.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jan Beulich <jbeulich@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: security@kernel.org <security@kernel.org>
+Cc: xen-devel <xen-devel@lists.xen.org>
+Link: http://lkml.kernel.org/r/4c6978476782160600471bd865b318db34c7b628.1438291540.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/desc.h | 15 --
+ arch/x86/include/asm/mmu.h | 3
+ arch/x86/include/asm/mmu_context.h | 54 ++++++-
+ arch/x86/kernel/cpu/common.c | 4
+ arch/x86/kernel/cpu/perf_event.c | 12 +
+ arch/x86/kernel/ldt.c | 264 ++++++++++++++++++++-----------------
+ arch/x86/kernel/process_64.c | 4
+ arch/x86/kernel/step.c | 6
+ arch/x86/power/cpu.c | 3
+ 9 files changed, 211 insertions(+), 154 deletions(-)
+
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
+ set_ldt(NULL, 0);
+ }
+
+-/*
+- * load one particular LDT into the current CPU
+- */
+-static inline void load_LDT_nolock(mm_context_t *pc)
+-{
+- set_ldt(pc->ldt, pc->size);
+-}
+-
+-static inline void load_LDT(mm_context_t *pc)
+-{
+- preempt_disable();
+- load_LDT_nolock(pc);
+- preempt_enable();
+-}
+-
+ static inline unsigned long get_desc_base(const struct desc_struct *desc)
+ {
+ return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,8 +9,7 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
+- int size;
++ struct ldt_struct *ldt;
+
+ #ifdef CONFIG_X86_64
+ /* True if mm supports a task running in 32 bit compatibility mode. */
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm
+ #endif
+
+ /*
++ * ldt_structs can be allocated, used, and freed, but they are never
++ * modified while live.
++ */
++struct ldt_struct {
++ /*
++ * Xen requires page-aligned LDTs with special permissions. This is
++ * needed to prevent us from installing evil descriptors such as
++ * call gates. On native, we could merge the ldt_struct and LDT
++ * allocations, but it's not worth trying to optimize.
++ */
++ struct desc_struct *entries;
++ int size;
++};
++
++static inline void load_mm_ldt(struct mm_struct *mm)
++{
++ struct ldt_struct *ldt;
++
++ /* lockless_dereference synchronizes with smp_store_release */
++ ldt = lockless_dereference(mm->context.ldt);
++
++ /*
++ * Any change to mm->context.ldt is followed by an IPI to all
++ * CPUs with the mm active. The LDT will not be freed until
++ * after the IPI is handled by all such CPUs. This means that,
++ * if the ldt_struct changes before we return, the values we see
++ * will be safe, and the new values will be loaded before we run
++ * any user code.
++ *
++ * NB: don't try to convert this to use RCU without extreme care.
++ * We would still need IRQs off, because we don't want to change
++ * the local LDT after an IPI loaded a newer value than the one
++ * that we can see.
++ */
++
++ if (unlikely(ldt))
++ set_ldt(ldt->entries, ldt->size);
++ else
++ clear_LDT();
++
++ DEBUG_LOCKS_WARN_ON(preemptible());
++}
++
++/*
+ * Used for LDT copy/destruction.
+ */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_s
+ * was called and then modify_ldt changed
+ * prev->context.ldt but suppressed an IPI to this CPU.
+ * In this case, prev->context.ldt != NULL, because we
+- * never free an LDT while the mm still exists. That
+- * means that next->context.ldt != prev->context.ldt,
+- * because mms never share an LDT.
++ * never set context.ldt to NULL while the mm still
++ * exists. That means that next->context.ldt !=
++ * prev->context.ldt, because mms never share an LDT.
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+- load_LDT_nolock(&next->context);
++ load_mm_ldt(next);
+ }
+ #ifdef CONFIG_SMP
+ else {
+@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_s
+ load_cr3(next->pgd);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ load_mm_cr4(next);
+- load_LDT_nolock(&next->context);
++ load_mm_ldt(next);
+ }
+ }
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1434,7 +1434,7 @@ void cpu_init(void)
+ load_sp0(t, ¤t->thread);
+ set_tss_desc(cpu, t);
+ load_TR_desc();
+- load_LDT(&init_mm.context);
++ load_mm_ldt(&init_mm);
+
+ clear_all_debug_regs();
+ dbg_restore_debug_regs();
+@@ -1483,7 +1483,7 @@ void cpu_init(void)
+ load_sp0(t, thread);
+ set_tss_desc(cpu, t);
+ load_TR_desc();
+- load_LDT(&init_mm.context);
++ load_mm_ldt(&init_mm);
+
+ t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -2170,21 +2170,25 @@ static unsigned long get_segment_base(un
+ int idx = segment >> 3;
+
+ if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
++ struct ldt_struct *ldt;
++
+ if (idx > LDT_ENTRIES)
+ return 0;
+
+- if (idx > current->active_mm->context.size)
++ /* IRQs are off, so this synchronizes with smp_store_release */
++ ldt = lockless_dereference(current->active_mm->context.ldt);
++ if (!ldt || idx > ldt->size)
+ return 0;
+
+- desc = current->active_mm->context.ldt;
++ desc = &ldt->entries[idx];
+ } else {
+ if (idx > GDT_ENTRIES)
+ return 0;
+
+- desc = raw_cpu_ptr(gdt_page.gdt);
++ desc = raw_cpu_ptr(gdt_page.gdt) + idx;
+ }
+
+- return get_desc_base(desc + idx);
++ return get_desc_base(desc);
+ }
+
+ #ifdef CONFIG_COMPAT
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -12,6 +12,7 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
++#include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+
+@@ -20,82 +21,82 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+
+-#ifdef CONFIG_SMP
++/* context.lock is held for us, so we don't need any locking. */
+ static void flush_ldt(void *current_mm)
+ {
+- if (current->active_mm == current_mm)
+- load_LDT(¤t->active_mm->context);
++ mm_context_t *pc;
++
++ if (current->active_mm != current_mm)
++ return;
++
++ pc = ¤t->active_mm->context;
++ set_ldt(pc->ldt->entries, pc->ldt->size);
+ }
+-#endif
+
+-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
++static struct ldt_struct *alloc_ldt_struct(int size)
+ {
+- void *oldldt, *newldt;
+- int oldsize;
++ struct ldt_struct *new_ldt;
++ int alloc_size;
+
+- if (mincount <= pc->size)
+- return 0;
+- oldsize = pc->size;
+- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
+- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
+- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
+- newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
+- else
+- newldt = (void *)__get_free_page(GFP_KERNEL);
++ if (size > LDT_ENTRIES)
++ return NULL;
+
+- if (!newldt)
+- return -ENOMEM;
++ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
++ if (!new_ldt)
++ return NULL;
++
++ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
++ alloc_size = size * LDT_ENTRY_SIZE;
++
++ /*
++ * Xen is very picky: it requires a page-aligned LDT that has no
++ * trailing nonzero bytes in any page that contains LDT descriptors.
++ * Keep it simple: zero the whole allocation and never allocate less
++ * than PAGE_SIZE.
++ */
++ if (alloc_size > PAGE_SIZE)
++ new_ldt->entries = vzalloc(alloc_size);
++ else
++ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+- if (oldsize)
+- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
+- oldldt = pc->ldt;
+- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
+- (mincount - oldsize) * LDT_ENTRY_SIZE);
+-
+- paravirt_alloc_ldt(newldt, mincount);
+-
+-#ifdef CONFIG_X86_64
+- /* CHECKME: Do we really need this ? */
+- wmb();
+-#endif
+- pc->ldt = newldt;
+- wmb();
+- pc->size = mincount;
+- wmb();
+-
+- if (reload) {
+-#ifdef CONFIG_SMP
+- preempt_disable();
+- load_LDT(pc);
+- if (!cpumask_equal(mm_cpumask(current->mm),
+- cpumask_of(smp_processor_id())))
+- smp_call_function(flush_ldt, current->mm, 1);
+- preempt_enable();
+-#else
+- load_LDT(pc);
+-#endif
+- }
+- if (oldsize) {
+- paravirt_free_ldt(oldldt, oldsize);
+- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
+- vfree(oldldt);
+- else
+- put_page(virt_to_page(oldldt));
++ if (!new_ldt->entries) {
++ kfree(new_ldt);
++ return NULL;
+ }
+- return 0;
++
++ new_ldt->size = size;
++ return new_ldt;
+ }
+
+-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++/* After calling this, the LDT is immutable. */
++static void finalize_ldt_struct(struct ldt_struct *ldt)
+ {
+- int err = alloc_ldt(new, old->size, 0);
+- int i;
++ paravirt_alloc_ldt(ldt->entries, ldt->size);
++}
++
++/* context.lock is held */
++static void install_ldt(struct mm_struct *current_mm,
++ struct ldt_struct *ldt)
++{
++ /* Synchronizes with lockless_dereference in load_mm_ldt. */
++ smp_store_release(¤t_mm->context.ldt, ldt);
++
++ /* Activate the LDT for all CPUs using current_mm. */
++ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
++}
+
+- if (err < 0)
+- return err;
++static void free_ldt_struct(struct ldt_struct *ldt)
++{
++ if (likely(!ldt))
++ return;
+
+- for (i = 0; i < old->size; i++)
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
+- return 0;
++ paravirt_free_ldt(ldt->entries, ldt->size);
++ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(ldt->entries);
++ else
++ kfree(ldt->entries);
++ kfree(ldt);
+ }
+
+ /*
+@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t
+ */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
++ struct ldt_struct *new_ldt;
+ struct mm_struct *old_mm;
+ int retval = 0;
+
+ mutex_init(&mm->context.lock);
+- mm->context.size = 0;
+ old_mm = current->mm;
+- if (old_mm && old_mm->context.size > 0) {
+- mutex_lock(&old_mm->context.lock);
+- retval = copy_ldt(&mm->context, &old_mm->context);
+- mutex_unlock(&old_mm->context.lock);
++ if (!old_mm) {
++ mm->context.ldt = NULL;
++ return 0;
+ }
++
++ mutex_lock(&old_mm->context.lock);
++ if (!old_mm->context.ldt) {
++ mm->context.ldt = NULL;
++ goto out_unlock;
++ }
++
++ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
++ if (!new_ldt) {
++ retval = -ENOMEM;
++ goto out_unlock;
++ }
++
++ memcpy(new_ldt->entries, old_mm->context.ldt->entries,
++ new_ldt->size * LDT_ENTRY_SIZE);
++ finalize_ldt_struct(new_ldt);
++
++ mm->context.ldt = new_ldt;
++
++out_unlock:
++ mutex_unlock(&old_mm->context.lock);
+ return retval;
+ }
+
+@@ -125,53 +146,47 @@ int init_new_context(struct task_struct
+ */
+ void destroy_context(struct mm_struct *mm)
+ {
+- if (mm->context.size) {
+-#ifdef CONFIG_X86_32
+- /* CHECKME: Can this ever happen ? */
+- if (mm == current->active_mm)
+- clear_LDT();
+-#endif
+- paravirt_free_ldt(mm->context.ldt, mm->context.size);
+- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
+- vfree(mm->context.ldt);
+- else
+- put_page(virt_to_page(mm->context.ldt));
+- mm->context.size = 0;
+- }
++ free_ldt_struct(mm->context.ldt);
++ mm->context.ldt = NULL;
+ }
+
+ static int read_ldt(void __user *ptr, unsigned long bytecount)
+ {
+- int err;
++ int retval;
+ unsigned long size;
+ struct mm_struct *mm = current->mm;
+
+- if (!mm->context.size)
+- return 0;
++ mutex_lock(&mm->context.lock);
++
++ if (!mm->context.ldt) {
++ retval = 0;
++ goto out_unlock;
++ }
++
+ if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
+ bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
+
+- mutex_lock(&mm->context.lock);
+- size = mm->context.size * LDT_ENTRY_SIZE;
++ size = mm->context.ldt->size * LDT_ENTRY_SIZE;
+ if (size > bytecount)
+ size = bytecount;
+
+- err = 0;
+- if (copy_to_user(ptr, mm->context.ldt, size))
+- err = -EFAULT;
+- mutex_unlock(&mm->context.lock);
+- if (err < 0)
+- goto error_return;
++ if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
++ retval = -EFAULT;
++ goto out_unlock;
++ }
++
+ if (size != bytecount) {
+- /* zero-fill the rest */
+- if (clear_user(ptr + size, bytecount - size) != 0) {
+- err = -EFAULT;
+- goto error_return;
++ /* Zero-fill the rest and pretend we read bytecount bytes. */
++ if (clear_user(ptr + size, bytecount - size)) {
++ retval = -EFAULT;
++ goto out_unlock;
+ }
+ }
+- return bytecount;
+-error_return:
+- return err;
++ retval = bytecount;
++
++out_unlock:
++ mutex_unlock(&mm->context.lock);
++ return retval;
+ }
+
+ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
+@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, u
+ struct desc_struct ldt;
+ int error;
+ struct user_desc ldt_info;
++ int oldsize, newsize;
++ struct ldt_struct *new_ldt, *old_ldt;
+
+ error = -EINVAL;
+ if (bytecount != sizeof(ldt_info))
+@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, u
+ goto out;
+ }
+
+- mutex_lock(&mm->context.lock);
+- if (ldt_info.entry_number >= mm->context.size) {
+- error = alloc_ldt(¤t->mm->context,
+- ldt_info.entry_number + 1, 1);
+- if (error < 0)
+- goto out_unlock;
+- }
+-
+- /* Allow LDTs to be cleared by the user. */
+- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+- if (oldmode || LDT_empty(&ldt_info)) {
+- memset(&ldt, 0, sizeof(ldt));
+- goto install;
++ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
++ LDT_empty(&ldt_info)) {
++ /* The user wants to clear the entry. */
++ memset(&ldt, 0, sizeof(ldt));
++ } else {
++ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
++ error = -EINVAL;
++ goto out;
+ }
++
++ fill_ldt(&ldt, &ldt_info);
++ if (oldmode)
++ ldt.avl = 0;
+ }
+
+- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+- error = -EINVAL;
++ mutex_lock(&mm->context.lock);
++
++ old_ldt = mm->context.ldt;
++ oldsize = old_ldt ? old_ldt->size : 0;
++ newsize = max((int)(ldt_info.entry_number + 1), oldsize);
++
++ error = -ENOMEM;
++ new_ldt = alloc_ldt_struct(newsize);
++ if (!new_ldt)
+ goto out_unlock;
+- }
+
+- fill_ldt(&ldt, &ldt_info);
+- if (oldmode)
+- ldt.avl = 0;
+-
+- /* Install the new entry ... */
+-install:
+- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
++ if (old_ldt)
++ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
++ new_ldt->entries[ldt_info.entry_number] = ldt;
++ finalize_ldt_struct(new_ldt);
++
++ install_ldt(mm, new_ldt);
++ free_ldt_struct(old_ldt);
+ error = 0;
+
+ out_unlock:
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, i
+ void release_thread(struct task_struct *dead_task)
+ {
+ if (dead_task->mm) {
+- if (dead_task->mm->context.size) {
++ if (dead_task->mm->context.ldt) {
+ pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
+ dead_task->comm,
+ dead_task->mm->context.ldt,
+- dead_task->mm->context.size);
++ dead_task->mm->context.ldt->size);
+ BUG();
+ }
+ }
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -5,6 +5,7 @@
+ #include <linux/mm.h>
+ #include <linux/ptrace.h>
+ #include <asm/desc.h>
++#include <asm/mmu_context.h>
+
+ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
+ {
+@@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struc
+ seg &= ~7UL;
+
+ mutex_lock(&child->mm->context.lock);
+- if (unlikely((seg >> 3) >= child->mm->context.size))
++ if (unlikely(!child->mm->context.ldt ||
++ (seg >> 3) >= child->mm->context.ldt->size))
+ addr = -1L; /* bogus selector, access would fault */
+ else {
+- desc = child->mm->context.ldt + seg;
++ desc = &child->mm->context.ldt->entries[seg];
+ base = get_desc_base(desc);
+
+ /* 16-bit code segment? */
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -23,6 +23,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/fpu-internal.h> /* pcntxt_mask */
+ #include <asm/cpu.h>
++#include <asm/mmu_context.h>
+
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -154,7 +155,7 @@ static void fix_processor_context(void)
+ syscall_init(); /* This sets MSR_*STAR and related */
+ #endif
+ load_TR_desc(); /* This does ltr */
+- load_LDT(¤t->active_mm->context); /* This does lldt */
++ load_mm_ldt(current->active_mm); /* This does lldt */
+ }
+
+ /**
--- /dev/null
+From aa1acff356bbedfd03b544051f5b371746735d89 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 30 Jul 2015 14:31:31 -0700
+Subject: x86/xen: Probe target addresses in set_aliased_prot() before the hypercall
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit aa1acff356bbedfd03b544051f5b371746735d89 upstream.
+
+The update_va_mapping hypercall can fail if the VA isn't present
+in the guest's page tables. Under certain loads, this can
+result in an OOPS when the target address is in unpopulated vmap
+space.
+
+While we're at it, add comments to help explain what's going on.
+
+This isn't a great long-term fix. This code should probably be
+changed to use something like set_memory_ro.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: David Vrabel <dvrabel@cantab.net>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jan Beulich <jbeulich@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: security@kernel.org <security@kernel.org>
+Cc: xen-devel <xen-devel@lists.xen.org>
+Link: http://lkml.kernel.org/r/0b0e55b995cda11e7829f140b833ef932fcabe3a.1438291540.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/enlighten.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pg
+ pte_t pte;
+ unsigned long pfn;
+ struct page *page;
++ unsigned char dummy;
+
+ ptep = lookup_address((unsigned long)v, &level);
+ BUG_ON(ptep == NULL);
+@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pg
+
+ pte = pfn_pte(pfn, prot);
+
++ /*
++ * Careful: update_va_mapping() will fail if the virtual address
++ * we're poking isn't populated in the page tables. We don't
++ * need to worry about the direct map (that's always in the page
++ * tables), but we need to be careful about vmap space. In
++ * particular, the top level page table can lazily propagate
++ * entries between processes, so if we've switched mms since we
++ * vmapped the target in the first place, we might not have the
++ * top-level page table entry populated.
++ *
++ * We disable preemption because we want the same mm active when
++ * we probe the target and when we issue the hypercall. We'll
++ * have the same nominal mm, but if we're a kernel thread, lazy
++ * mm dropping could change our pgd.
++ *
++ * Out of an abundance of caution, this uses __get_user() to fault
++ * in the target address just in case there's some obscure case
++ * in which the target address isn't readable.
++ */
++
++ preempt_disable();
++
++ pagefault_disable(); /* Avoid warnings due to being atomic. */
++ __get_user(dummy, (unsigned char __user __force *)v);
++ pagefault_enable();
++
+ if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ BUG();
+
+@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pg
+ BUG();
+ } else
+ kmap_flush_unused();
++
++ preempt_enable();
+ }
+
+ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_st
+ const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ int i;
+
++ /*
++ * We need to mark the all aliases of the LDT pages RO. We
++ * don't need to call vm_flush_aliases(), though, since that's
++ * only responsible for flushing aliases out the TLBs, not the
++ * page tables, and Xen will flush the TLB for us if needed.
++ *
++ * To avoid confusing future readers: none of this is necessary
++ * to load the LDT. The hypervisor only checks this when the
++ * LDT is faulted in due to subsequent descriptor access.
++ */
++
+ for(i = 0; i < entries; i += entries_per_page)
+ set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+ }
--- /dev/null
+From fcdf31a7c162de0c93a2bee51df4688ab0a348f8 Mon Sep 17 00:00:00 2001
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+Date: Fri, 31 Jul 2015 14:30:42 +0100
+Subject: xen/events/fifo: Handle linked events when closing a port
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+commit fcdf31a7c162de0c93a2bee51df4688ab0a348f8 upstream.
+
+An event channel bound to a CPU that was offlined may still be linked
+on that CPU's queue. If this event channel is closed and reused,
+subsequent events will be lost because the event channel is never
+unlinked and thus cannot be linked onto the correct queue.
+
+When a channel is closed and the event is still linked into a queue,
+ensure that it is unlinked before completing.
+
+If the CPU to which the event channel bound is online, spin until the
+event is handled by that CPU. If that CPU is offline, it can't handle
+the event, so clear the event queue during the close, dropping the
+events.
+
+This fixes the missing interrupts (and subsequent disk stalls etc.)
+when offlining a CPU.
+
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/events/events_base.c | 10 ++++---
+ drivers/xen/events/events_fifo.c | 45 +++++++++++++++++++++++++++++++----
+ drivers/xen/events/events_internal.h | 7 +++++
+ 3 files changed, 53 insertions(+), 9 deletions(-)
+
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -452,10 +452,12 @@ static void xen_free_irq(unsigned irq)
+ irq_free_desc(irq);
+ }
+
+-static void xen_evtchn_close(unsigned int port)
++static void xen_evtchn_close(unsigned int port, unsigned int cpu)
+ {
+ struct evtchn_close close;
+
++ xen_evtchn_op_close(port, cpu);
++
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+@@ -544,7 +546,7 @@ out:
+
+ err:
+ pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
+- xen_evtchn_close(evtchn);
++ xen_evtchn_close(evtchn, NR_CPUS);
+ return 0;
+ }
+
+@@ -565,7 +567,7 @@ static void shutdown_pirq(struct irq_dat
+ return;
+
+ mask_evtchn(evtchn);
+- xen_evtchn_close(evtchn);
++ xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn));
+ xen_irq_info_cleanup(info);
+ }
+
+@@ -609,7 +611,7 @@ static void __unbind_from_irq(unsigned i
+ if (VALID_EVTCHN(evtchn)) {
+ unsigned int cpu = cpu_from_irq(irq);
+
+- xen_evtchn_close(evtchn);
++ xen_evtchn_close(evtchn, cpu);
+
+ switch (type_from_irq(irq)) {
+ case IRQT_VIRQ:
+--- a/drivers/xen/events/events_fifo.c
++++ b/drivers/xen/events/events_fifo.c
+@@ -255,6 +255,12 @@ static void evtchn_fifo_unmask(unsigned
+ }
+ }
+
++static bool evtchn_fifo_is_linked(unsigned port)
++{
++ event_word_t *word = event_word_from_port(port);
++ return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word));
++}
++
+ static uint32_t clear_linked(volatile event_word_t *word)
+ {
+ event_word_t new, old, w;
+@@ -281,7 +287,8 @@ static void handle_irq_for_port(unsigned
+
+ static void consume_one_event(unsigned cpu,
+ struct evtchn_fifo_control_block *control_block,
+- unsigned priority, unsigned long *ready)
++ unsigned priority, unsigned long *ready,
++ bool drop)
+ {
+ struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
+ uint32_t head;
+@@ -313,13 +320,15 @@ static void consume_one_event(unsigned c
+ if (head == 0)
+ clear_bit(priority, ready);
+
+- if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
+- handle_irq_for_port(port);
++ if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
++ if (likely(!drop))
++ handle_irq_for_port(port);
++ }
+
+ q->head[priority] = head;
+ }
+
+-static void evtchn_fifo_handle_events(unsigned cpu)
++static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
+ {
+ struct evtchn_fifo_control_block *control_block;
+ unsigned long ready;
+@@ -331,11 +340,16 @@ static void evtchn_fifo_handle_events(un
+
+ while (ready) {
+ q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
+- consume_one_event(cpu, control_block, q, &ready);
++ consume_one_event(cpu, control_block, q, &ready, drop);
+ ready |= xchg(&control_block->ready, 0);
+ }
+ }
+
++static void evtchn_fifo_handle_events(unsigned cpu)
++{
++ __evtchn_fifo_handle_events(cpu, false);
++}
++
+ static void evtchn_fifo_resume(void)
+ {
+ unsigned cpu;
+@@ -371,6 +385,26 @@ static void evtchn_fifo_resume(void)
+ event_array_pages = 0;
+ }
+
++static void evtchn_fifo_close(unsigned port, unsigned int cpu)
++{
++ if (cpu == NR_CPUS)
++ return;
++
++ get_online_cpus();
++ if (cpu_online(cpu)) {
++ if (WARN_ON(irqs_disabled()))
++ goto out;
++
++ while (evtchn_fifo_is_linked(port))
++ cpu_relax();
++ } else {
++ __evtchn_fifo_handle_events(cpu, true);
++ }
++
++out:
++ put_online_cpus();
++}
++
+ static const struct evtchn_ops evtchn_ops_fifo = {
+ .max_channels = evtchn_fifo_max_channels,
+ .nr_channels = evtchn_fifo_nr_channels,
+@@ -384,6 +418,7 @@ static const struct evtchn_ops evtchn_op
+ .unmask = evtchn_fifo_unmask,
+ .handle_events = evtchn_fifo_handle_events,
+ .resume = evtchn_fifo_resume,
++ .close = evtchn_fifo_close,
+ };
+
+ static int evtchn_fifo_alloc_control_block(unsigned cpu)
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -68,6 +68,7 @@ struct evtchn_ops {
+ bool (*test_and_set_mask)(unsigned port);
+ void (*mask)(unsigned port);
+ void (*unmask)(unsigned port);
++ void (*close)(unsigned port, unsigned cpu);
+
+ void (*handle_events)(unsigned cpu);
+ void (*resume)(void);
+@@ -145,6 +146,12 @@ static inline void xen_evtchn_resume(voi
+ evtchn_ops->resume();
+ }
+
++static inline void xen_evtchn_op_close(unsigned port, unsigned cpu)
++{
++ if (evtchn_ops->close)
++ return evtchn_ops->close(port, cpu);
++}
++
+ void xen_evtchn_2l_init(void);
+ int xen_evtchn_fifo_init(void);
+
--- /dev/null
+From 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
+ <marmarek@invisiblethingslab.com>
+Date: Fri, 26 Jun 2015 03:28:24 +0200
+Subject: xen/gntdevt: Fix race condition in gntdev_release()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
+
+commit 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 upstream.
+
+While gntdev_release() is called the MMU notifier is still registered
+and can traverse priv->maps list even if no pages are mapped (which is
+the case -- gntdev_release() is called after all). But
+gntdev_release() will clear that list, so make sure that only one of
+those things happens at the same time.
+
+Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *
+
+ pr_debug("priv %p\n", priv);
+
++ mutex_lock(&priv->lock);
+ while (!list_empty(&priv->maps)) {
+ map = list_entry(priv->maps.next, struct grant_map, next);
+ list_del(&map->next);
+ gntdev_put_map(NULL /* already removed */, map);
+ }
+ WARN_ON(!list_empty(&priv->freeable_maps));
++ mutex_unlock(&priv->lock);
+
+ if (use_ptemod)
+ mmu_notifier_unregister(&priv->mn, priv->mm);