--- /dev/null
+From 7e700d2c59e5853c9126642976b4f5768f64c9b3 Mon Sep 17 00:00:00 2001
+From: Prarit Bhargava <prarit@redhat.com>
+Date: Wed, 31 May 2017 13:32:00 -0400
+Subject: acpi/nfit: Fix memory corruption/Unregister mce decoder on failure
+
+From: Prarit Bhargava <prarit@redhat.com>
+
+commit 7e700d2c59e5853c9126642976b4f5768f64c9b3 upstream.
+
+nfit_init() calls nfit_mce_register() on module load. When the module
+load fails the nfit mce decoder is not unregistered. The module's
+memory is freed leaving the decoder chain referencing junk. This will
+cause panics as future registrations will reference the free'd memory.
+
+Unregister the nfit mce decoder on module init failure.
+
+[v2]: register and then unregister mce handler to avoid losing mce events
+[v3]: also cleanup nfit workqueue
+
+Fixes: 6839a6d96f4e ("nfit: do an ARS scrub on hitting a latent media error")
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Len Brown <lenb@kernel.org>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: "Lee, Chun-Yi" <joeyli.kernel@gmail.com>
+Cc: Linda Knippers <linda.knippers@hpe.com>
+Cc: lszubowi@redhat.com
+Acked-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Prarit Bhargava <prarit@redhat.com>
+Reviewed-by: Vishal Verma <vishal.l.verma@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/nfit/core.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -3043,6 +3043,8 @@ static struct acpi_driver acpi_nfit_driv
+
+ static __init int nfit_init(void)
+ {
++ int ret;
++
+ BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
+@@ -3070,8 +3072,14 @@ static __init int nfit_init(void)
+ return -ENOMEM;
+
+ nfit_mce_register();
++ ret = acpi_bus_register_driver(&acpi_nfit_driver);
++ if (ret) {
++ nfit_mce_unregister();
++ destroy_workqueue(nfit_wq);
++ }
++
++ return ret;
+
+- return acpi_bus_register_driver(&acpi_nfit_driver);
+ }
+
+ static __exit void nfit_exit(void)
--- /dev/null
+From 6e34e1f23d780978da65968327cbba6d7013a73f Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Thu, 13 Jul 2017 15:03:51 -0700
+Subject: cpufreq: intel_pstate: Correct the busy calculation for KNL
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit 6e34e1f23d780978da65968327cbba6d7013a73f upstream.
+
+The busy percent calculated for the Knights Landing (KNL) platform
+is 1024 times smaller than the correct busy value. This causes
+performance to get stuck at the lowest ratio.
+
+The scaling algorithm used for KNL is performance-based, but it still
+looks at the CPU load to set the scaled busy factor to 0 when the
+load is less than 1 percent. In this case, since the computed load
+is 1024x smaller than it should be, the scaled busy factor will
+always be 0, irrespective of CPU business.
+
+This needs a fix similar to the turbostat one in commit b2b34dfe4d9a
+(tools/power turbostat: KNL workaround for %Busy and Avg_MHz).
+
+For this reason, add one more callback to processor-specific
+callbacks to specify an MPERF multiplier represented by a number of
+bit positions to shift the value of that register to the left to
+copmensate for its rate difference with respect to the TSC. This
+shift value is used during CPU busy calculations.
+
+Fixes: ffb810563c (intel_pstate: Avoid getting stuck in high P-states when idle)
+Reported-and-tested-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+[ rjw: Changelog ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/intel_pstate.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -225,6 +225,9 @@ struct global_params {
+ * @vid: Stores VID limits for this CPU
+ * @pid: Stores PID parameters for this CPU
+ * @last_sample_time: Last Sample time
++ * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
++ * This shift is a multiplier to mperf delta to
++ * calculate CPU busy.
+ * @prev_aperf: Last APERF value read from APERF MSR
+ * @prev_mperf: Last MPERF value read from MPERF MSR
+ * @prev_tsc: Last timestamp counter (TSC) value
+@@ -261,6 +264,7 @@ struct cpudata {
+
+ u64 last_update;
+ u64 last_sample_time;
++ u64 aperf_mperf_shift;
+ u64 prev_aperf;
+ u64 prev_mperf;
+ u64 prev_tsc;
+@@ -323,6 +327,7 @@ struct pstate_funcs {
+ int (*get_min)(void);
+ int (*get_turbo)(void);
+ int (*get_scaling)(void);
++ int (*get_aperf_mperf_shift)(void);
+ u64 (*get_val)(struct cpudata*, int pstate);
+ void (*get_vid)(struct cpudata *);
+ void (*update_util)(struct update_util_data *data, u64 time,
+@@ -1485,6 +1490,11 @@ static u64 core_get_val(struct cpudata *
+ return val;
+ }
+
++static int knl_get_aperf_mperf_shift(void)
++{
++ return 10;
++}
++
+ static int knl_get_turbo_pstate(void)
+ {
+ u64 value;
+@@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
++ if (pstate_funcs.get_aperf_mperf_shift)
++ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
++
+ if (pstate_funcs.get_vid)
+ pstate_funcs.get_vid(cpu);
+
+@@ -1619,7 +1632,8 @@ static inline int32_t get_target_pstate_
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return cpu->pstate.turbo_pstate;
+
+- busy_frac = div_fp(sample->mperf, sample->tsc);
++ busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
++ sample->tsc);
+
+ boost = cpu->iowait_boost;
+ cpu->iowait_boost >>= 1;
+@@ -1681,7 +1695,8 @@ static inline int32_t get_target_pstate_
+ sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
+ perf_scaled = mul_fp(perf_scaled, sample_ratio);
+ } else {
+- sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
++ sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
++ cpu->sample.tsc);
+ if (sample_ratio < int_tofp(1))
+ perf_scaled = 0;
+ }
+@@ -1824,6 +1839,7 @@ static const struct pstate_funcs knl_fun
+ .get_max_physical = core_get_max_pstate_physical,
+ .get_min = core_get_min_pstate,
+ .get_turbo = knl_get_turbo_pstate,
++ .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
+ .get_scaling = core_get_scaling,
+ .get_val = core_get_val,
+ .update_util = intel_pstate_update_util_pid,
+@@ -2408,6 +2424,7 @@ static void __init copy_cpu_funcs(struct
+ pstate_funcs.get_val = funcs->get_val;
+ pstate_funcs.get_vid = funcs->get_vid;
+ pstate_funcs.update_util = funcs->update_util;
++ pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
+
+ intel_pstate_use_acpi_profile();
+ }
--- /dev/null
+From bbb3be170ac2891526ad07b18af7db226879a8e7 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 18 Jul 2017 17:49:14 -0700
+Subject: device-dax: fix sysfs duplicate warnings
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit bbb3be170ac2891526ad07b18af7db226879a8e7 upstream.
+
+Fix warnings of the form...
+
+ WARNING: CPU: 10 PID: 4983 at fs/sysfs/dir.c:31 sysfs_warn_dup+0x62/0x80
+ sysfs: cannot create duplicate filename '/class/dax/dax12.0'
+ Call Trace:
+ dump_stack+0x63/0x86
+ __warn+0xcb/0xf0
+ warn_slowpath_fmt+0x5a/0x80
+ ? kernfs_path_from_node+0x4f/0x60
+ sysfs_warn_dup+0x62/0x80
+ sysfs_do_create_link_sd.isra.2+0x97/0xb0
+ sysfs_create_link+0x25/0x40
+ device_add+0x266/0x630
+ devm_create_dax_dev+0x2cf/0x340 [dax]
+ dax_pmem_probe+0x1f5/0x26e [dax_pmem]
+ nvdimm_bus_probe+0x71/0x120
+
+...by reusing the namespace id for the device-dax instance name.
+
+Now that we have decided that there will never by more than one
+device-dax instance per libnvdimm-namespace parent device [1], we can
+directly reuse the namepace ids. There are some possible follow-on
+cleanups, but those are saved for a later patch to simplify the -stable
+backport.
+
+[1]: https://lists.01.org/pipermail/linux-nvdimm/2016-December/008266.html
+
+Fixes: 98a29c39dc68 ("libnvdimm, namespace: allow creation of multiple pmem...")
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Reported-by: Dariusz Dokupil <dariusz.dokupil@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dax/device-dax.h | 2 +-
+ drivers/dax/device.c | 24 ++++++++++++++++--------
+ drivers/dax/pmem.c | 12 +++++++-----
+ 3 files changed, 24 insertions(+), 14 deletions(-)
+
+--- a/drivers/dax/device-dax.h
++++ b/drivers/dax/device-dax.h
+@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(stru
+ int region_id, struct resource *res, unsigned int align,
+ void *addr, unsigned long flags);
+ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
+- struct resource *res, int count);
++ int id, struct resource *res, int count);
+ #endif /* __DEVICE_DAX_H__ */
+--- a/drivers/dax/device.c
++++ b/drivers/dax/device.c
+@@ -528,7 +528,8 @@ static void dev_dax_release(struct devic
+ struct dax_region *dax_region = dev_dax->region;
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+
+- ida_simple_remove(&dax_region->ida, dev_dax->id);
++ if (dev_dax->id >= 0)
++ ida_simple_remove(&dax_region->ida, dev_dax->id);
+ dax_region_put(dax_region);
+ put_dax(dax_dev);
+ kfree(dev_dax);
+@@ -558,7 +559,7 @@ static void unregister_dev_dax(void *dev
+ }
+
+ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
+- struct resource *res, int count)
++ int id, struct resource *res, int count)
+ {
+ struct device *parent = dax_region->dev;
+ struct dax_device *dax_dev;
+@@ -586,10 +587,16 @@ struct dev_dax *devm_create_dev_dax(stru
+ if (i < count)
+ goto err_id;
+
+- dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+- if (dev_dax->id < 0) {
+- rc = dev_dax->id;
+- goto err_id;
++ if (id < 0) {
++ id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
++ dev_dax->id = id;
++ if (id < 0) {
++ rc = id;
++ goto err_id;
++ }
++ } else {
++ /* region provider owns @id lifetime */
++ dev_dax->id = -1;
+ }
+
+ /*
+@@ -619,7 +626,7 @@ struct dev_dax *devm_create_dev_dax(stru
+ dev->parent = parent;
+ dev->groups = dax_attribute_groups;
+ dev->release = dev_dax_release;
+- dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
++ dev_set_name(dev, "dax%d.%d", dax_region->id, id);
+
+ rc = cdev_device_add(cdev, dev);
+ if (rc) {
+@@ -635,7 +642,8 @@ struct dev_dax *devm_create_dev_dax(stru
+ return dev_dax;
+
+ err_dax:
+- ida_simple_remove(&dax_region->ida, dev_dax->id);
++ if (dev_dax->id >= 0)
++ ida_simple_remove(&dax_region->ida, dev_dax->id);
+ err_id:
+ kfree(dev_dax);
+
+--- a/drivers/dax/pmem.c
++++ b/drivers/dax/pmem.c
+@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *d
+
+ static int dax_pmem_probe(struct device *dev)
+ {
+- int rc;
+ void *addr;
+ struct resource res;
++ int rc, id, region_id;
+ struct nd_pfn_sb *pfn_sb;
+ struct dev_dax *dev_dax;
+ struct dax_pmem *dax_pmem;
+- struct nd_region *nd_region;
+ struct nd_namespace_io *nsio;
+ struct dax_region *dax_region;
+ struct nd_namespace_common *ndns;
+@@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device
+ /* adjust the dax_region resource to the start of data */
+ res.start += le64_to_cpu(pfn_sb->dataoff);
+
+- nd_region = to_nd_region(dev->parent);
+- dax_region = alloc_dax_region(dev, nd_region->id, &res,
++ rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id);
++ if (rc != 2)
++ return -EINVAL;
++
++ dax_region = alloc_dax_region(dev, region_id, &res,
+ le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
+ if (!dax_region)
+ return -ENOMEM;
+
+ /* TODO: support for subdividing a dax region... */
+- dev_dax = devm_create_dev_dax(dax_region, &res, 1);
++ dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
+
+ /* child dev_dax instances now own the lifetime of the dax_region */
+ dax_region_put(dax_region);
--- /dev/null
+From 7581d5ca2bb269cfc2ce2d0cb489aac513167f6b Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 22 Jun 2017 17:02:11 +0100
+Subject: drm/i915/fbdev: Check for existence of ifbdev->vma before operations
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 7581d5ca2bb269cfc2ce2d0cb489aac513167f6b upstream.
+
+Commit fabef825626d ("drm/i915: Drop struct_mutex around frontbuffer
+flushes") adds a dependency to ifbdev->vma when flushing the framebufer,
+but the checks are only against the existence of the ifbdev->fb and not
+against ifbdev->vma. This leaves a window of opportunity where we may
+try to operate on the fbdev prior to it being probed (thanks to
+asynchronous booting).
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101534
+Fixes: fabef825626d ("drm/i915: Drop struct_mutex around frontbuffer flushes")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Daniel Vetter <daniel.vetter@intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170622160211.783-1-chris@chris-wilson.co.uk
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+(cherry picked from commit 15727ed0d944ce1dec8b9e1082dd3df29a0fdf44)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_fbdev.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -535,13 +535,14 @@ static void intel_fbdev_destroy(struct i
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+- if (ifbdev->fb) {
++ if (ifbdev->vma) {
+ mutex_lock(&ifbdev->helper.dev->struct_mutex);
+ intel_unpin_fb_vma(ifbdev->vma);
+ mutex_unlock(&ifbdev->helper.dev->struct_mutex);
++ }
+
++ if (ifbdev->fb)
+ drm_framebuffer_remove(&ifbdev->fb->base);
+- }
+
+ kfree(ifbdev);
+ }
+@@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct fb_info *info;
+
+- if (!ifbdev || !ifbdev->fb)
++ if (!ifbdev || !ifbdev->vma)
+ return;
+
+ info = ifbdev->helper.fbdev;
+@@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(str
+ {
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+- if (ifbdev && ifbdev->fb)
++ if (ifbdev && ifbdev->vma)
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
+ }
+
+@@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm
+ return;
+
+ intel_fbdev_sync(ifbdev);
+- if (!ifbdev->fb)
++ if (!ifbdev->vma)
+ return;
+
+ if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
--- /dev/null
+From 04941829b0049d2446c7042ab9686dd057d809a6 Mon Sep 17 00:00:00 2001
+From: "sagar.a.kamble@intel.com" <sagar.a.kamble@intel.com>
+Date: Tue, 27 Jun 2017 23:09:41 +0530
+Subject: drm/i915: Hold RPM wakelock while initializing OA buffer
+
+From: sagar.a.kamble@intel.com <sagar.a.kamble@intel.com>
+
+commit 04941829b0049d2446c7042ab9686dd057d809a6 upstream.
+
+OA buffer initialization involves access to HW registers to set
+the OA base, head and tail. Ensure device is awake while setting
+these. With this, all oa.ops are covered under RPM and forcewake
+wakelock.
+
+Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
+Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/1498585181-23048-1-git-send-email-sagar.a.kamble@intel.com
+Fixes: d79651522e89c ("drm/i915: Enable i915 perf stream for Haswell OA unit")
+(cherry picked from commit 987f8c444aa2c33d98e7030d0c5f0a5325cc84ea)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_perf.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -1210,10 +1210,6 @@ static int i915_oa_stream_init(struct i9
+ return ret;
+ }
+
+- ret = alloc_oa_buffer(dev_priv);
+- if (ret)
+- goto err_oa_buf_alloc;
+-
+ /* PRM - observability performance counters:
+ *
+ * OACONTROL, performance counter enable, note:
+@@ -1229,6 +1225,10 @@ static int i915_oa_stream_init(struct i9
+ intel_runtime_pm_get(dev_priv);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
++ ret = alloc_oa_buffer(dev_priv);
++ if (ret)
++ goto err_oa_buf_alloc;
++
+ ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
+ if (ret)
+ goto err_enable;
+@@ -1240,11 +1240,11 @@ static int i915_oa_stream_init(struct i9
+ return 0;
+
+ err_enable:
+- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+- intel_runtime_pm_put(dev_priv);
+ free_oa_buffer(dev_priv);
+
+ err_oa_buf_alloc:
++ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
++ intel_runtime_pm_put(dev_priv);
+ if (stream->ctx)
+ oa_put_render_ctx_id(stream);
+
--- /dev/null
+From 9c75b185274b7766fe69c2e73607c1ed780b284b Mon Sep 17 00:00:00 2001
+From: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
+Date: Wed, 28 Jun 2017 18:06:05 -0300
+Subject: drm/i915: reintroduce VLV/CHV PFI programming power domain workaround
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
+
+commit 9c75b185274b7766fe69c2e73607c1ed780b284b upstream.
+
+There are still cases on these platforms where an attempt is made to
+configure the CDCLK while the power domain is off, like when coming back
+from a suspend. So the workaround below is still needed.
+
+This effectively reverts commit 63ff30442519 ("drm/i915: Nuke the
+VLV/CHV PFI programming power domain workaround").
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101517
+Suggested-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170628210605.4994-1-krisman@collabora.co.uk
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+(cherry picked from commit 886015a0ad43c7fc034b23ea4614ba39162f9ddd)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_cdclk.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i91
+ int cdclk = cdclk_state->cdclk;
+ u32 val, cmd;
+
++ /* There are cases where we can end up here with power domains
++ * off and a CDCLK frequency other than the minimum, like when
++ * issuing a modeset without actually changing any display after
++ * a system suspend. So grab the PIPE-A domain, which covers
++ * the HW blocks needed for the following programming.
++ */
++ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
++
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
+ cmd = 2;
+ else if (cdclk == 266667)
+@@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i91
+ intel_update_cdclk(dev_priv);
+
+ vlv_program_pfi_credits(dev_priv);
++
++ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ }
+
+ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
+@@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i91
+ return;
+ }
+
++ /* There are cases where we can end up here with power domains
++ * off and a CDCLK frequency other than the minimum, like when
++ * issuing a modeset without actually changing any display after
++ * a system suspend. So grab the PIPE-A domain, which covers
++ * the HW blocks needed for the following programming.
++ */
++ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
++
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+@@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i91
+ intel_update_cdclk(dev_priv);
+
+ vlv_program_pfi_credits(dev_priv);
++
++ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ }
+
+ static int bdw_calc_cdclk(int max_pixclk)
--- /dev/null
+From 799ee2970485dc206c3bf347d6e6827c04d5e4f9 Mon Sep 17 00:00:00 2001
+From: Philipp Zabel <p.zabel@pengutronix.de>
+Date: Mon, 12 Jun 2017 17:54:29 +0200
+Subject: drm/imx: parallel-display: Accept drm_of_find_panel_or_bridge failure
+
+From: Philipp Zabel <p.zabel@pengutronix.de>
+
+commit 799ee2970485dc206c3bf347d6e6827c04d5e4f9 upstream.
+
+The parallel panel driver should continue to work without having an
+endpoint linking to an panel in DT for backwards compatibility.
+With the recent switch to drm_of_find_panel_or_bridge, an absent
+panel results in a failure with -ENODEV error return code. To restore
+the old behaviour, ignore the -ENODEV return code.
+
+Reported-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+Fixes: ebc944613567 ("drm: convert drivers to use drm_of_find_panel_or_bridge")
+Tested-by: Chris Healy <cphealy@gmail.com>
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/imx/parallel-display.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/imx/parallel-display.c
++++ b/drivers/gpu/drm/imx/parallel-display.c
+@@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *de
+
+ /* port@1 is the output port */
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
+- if (ret)
++ if (ret && ret != -ENODEV)
+ return ret;
+
+ imxpd->dev = dev;
--- /dev/null
+From 975e83cfb8dc16e7a2fdc58188c77c0c605876c2 Mon Sep 17 00:00:00 2001
+From: Sudeep Holla <Sudeep.Holla@arm.com>
+Date: Fri, 14 Jul 2017 11:51:48 +0100
+Subject: PM / Domains: defer dev_pm_domain_set() until genpd->attach_dev succeeds if present
+
+From: Sudeep Holla <Sudeep.Holla@arm.com>
+
+commit 975e83cfb8dc16e7a2fdc58188c77c0c605876c2 upstream.
+
+If the genpd->attach_dev or genpd->power_on fails, genpd_dev_pm_attach
+may return -EPROBE_DEFER initially. However genpd_alloc_dev_data sets
+the PM domain for the device unconditionally.
+
+When subsequent attempts are made to call genpd_dev_pm_attach, it may
+return -EEXISTS checking dev->pm_domain without re-attempting to call
+attach_dev or power_on.
+
+platform_drv_probe then attempts to call drv->probe as the return value
+-EEXIST != -EPROBE_DEFER, which may end up in a situation where the
+device is accessed without it's power domain switched on.
+
+Fixes: f104e1e5ef57 (PM / Domains: Re-order initialization of generic_pm_domain_data)
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Acked-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/power/domain.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1168,8 +1168,6 @@ static struct generic_pm_domain_data *ge
+
+ spin_unlock_irq(&dev->power.lock);
+
+- dev_pm_domain_set(dev, &genpd->domain);
+-
+ return gpd_data;
+
+ err_free:
+@@ -1183,8 +1181,6 @@ static struct generic_pm_domain_data *ge
+ static void genpd_free_dev_data(struct device *dev,
+ struct generic_pm_domain_data *gpd_data)
+ {
+- dev_pm_domain_set(dev, NULL);
+-
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.subsys_data->domain_data = NULL;
+@@ -1221,6 +1217,8 @@ static int genpd_add_device(struct gener
+ if (ret)
+ goto out;
+
++ dev_pm_domain_set(dev, &genpd->domain);
++
+ genpd->device_count++;
+ genpd->max_off_time_changed = true;
+
+@@ -1282,6 +1280,8 @@ static int genpd_remove_device(struct ge
+ if (genpd->detach_dev)
+ genpd->detach_dev(genpd, dev);
+
++ dev_pm_domain_set(dev, NULL);
++
+ list_del_init(&pdd->list_node);
+
+ genpd_unlock(genpd);
--- /dev/null
+From 6883cd7f68245e43e91e5ee583b7550abf14523f Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 22 Jun 2017 09:32:49 +0200
+Subject: reiserfs: Don't clear SGID when inheriting ACLs
+
+From: Jan Kara <jack@suse.cz>
+
+commit 6883cd7f68245e43e91e5ee583b7550abf14523f upstream.
+
+When new directory 'DIR1' is created in a directory 'DIR0' with SGID bit
+set, DIR1 is expected to have SGID bit set (and owning group equal to
+the owning group of 'DIR0'). However when 'DIR0' also has some default
+ACLs that 'DIR1' inherits, setting these ACLs will result in SGID bit on
+'DIR1' to get cleared if user is not member of the owning group.
+
+Fix the problem by moving posix_acl_update_mode() out of
+__reiserfs_set_acl() into reiserfs_set_acl(). That way the function will
+not be called when inheriting ACLs which is what we want as it prevents
+SGID bit clearing and the mode has been properly set by
+posix_acl_create() anyway.
+
+Fixes: 073931017b49d9458aa351605b43a7e34598caef
+CC: reiserfs-devel@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/reiserfs/xattr_acl.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, st
+ error = journal_begin(&th, inode->i_sb, jcreate_blocks);
+ reiserfs_write_unlock(inode->i_sb);
+ if (error == 0) {
++ if (type == ACL_TYPE_ACCESS && acl) {
++ error = posix_acl_update_mode(inode, &inode->i_mode,
++ &acl);
++ if (error)
++ goto unlock;
++ }
+ error = __reiserfs_set_acl(&th, inode, type, acl);
++unlock:
+ reiserfs_write_lock(inode->i_sb);
+ error2 = journal_end(&th);
+ reiserfs_write_unlock(inode->i_sb);
+@@ -241,11 +248,6 @@ __reiserfs_set_acl(struct reiserfs_trans
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name = XATTR_NAME_POSIX_ACL_ACCESS;
+- if (acl) {
+- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+- if (error)
+- return error;
+- }
+ break;
+ case ACL_TYPE_DEFAULT:
+ name = XATTR_NAME_POSIX_ACL_DEFAULT;
percpu_counter-rename-__percpu_counter_add-to-percpu_counter_add_batch.patch
writeback-rework-wb__stat-family-of-functions.patch
kernel-fork.c-virtually-mapped-stacks-do-not-disable-interrupts.patch
+acpi-nfit-fix-memory-corruption-unregister-mce-decoder-on-failure.patch
+vmbus-re-enable-channel-tasklet.patch
+cpufreq-intel_pstate-correct-the-busy-calculation-for-knl.patch
+spmi-include-of-based-modalias-in-device-uevent.patch
+reiserfs-don-t-clear-sgid-when-inheriting-acls.patch
+device-dax-fix-sysfs-duplicate-warnings.patch
+drm-imx-parallel-display-accept-drm_of_find_panel_or_bridge-failure.patch
+pm-domains-defer-dev_pm_domain_set-until-genpd-attach_dev-succeeds-if-present.patch
+tracing-fix-kmemleak-in-instance_rmdir.patch
+drm-i915-fbdev-check-for-existence-of-ifbdev-vma-before-operations.patch
+drm-i915-hold-rpm-wakelock-while-initializing-oa-buffer.patch
+drm-i915-reintroduce-vlv-chv-pfi-programming-power-domain-workaround.patch
+smp-hotplug-move-unparking-of-percpu-threads-to-the-control-cpu.patch
+smp-hotplug-replace-bug_on-and-react-useful.patch
--- /dev/null
+From 9cd4f1a4e7a858849e889a081a99adff83e08e4c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 4 Jul 2017 22:20:23 +0200
+Subject: smp/hotplug: Move unparking of percpu threads to the control CPU
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 9cd4f1a4e7a858849e889a081a99adff83e08e4c upstream.
+
+Vikram reported the following backtrace:
+
+ BUG: scheduling while atomic: swapper/7/0/0x00000002
+ CPU: 7 PID: 0 Comm: swapper/7 Not tainted 4.9.32-perf+ #680
+ schedule
+ schedule_hrtimeout_range_clock
+ schedule_hrtimeout
+ wait_task_inactive
+ __kthread_bind_mask
+ __kthread_bind
+ __kthread_unpark
+ kthread_unpark
+ cpuhp_online_idle
+ cpu_startup_entry
+ secondary_start_kernel
+
+He analyzed correctly that a parked cpu hotplug thread of an offlined CPU
+was still on the runqueue when the CPU came back online and tried to unpark
+it. This causes the thread which invoked kthread_unpark() to call
+wait_task_inactive() and subsequently schedule() with preemption disabled.
+His proposed workaround was to "make sure" that a parked thread has
+scheduled out when the CPU goes offline, so the situation cannot happen.
+
+But that's still wrong because the root cause is not the fact that the
+percpu thread is still on the runqueue and neither that preemption is
+disabled, which could be simply solved by enabling preemption before
+calling kthread_unpark().
+
+The real issue is that the calling thread is the idle task of the upcoming
+CPU, which is not supposed to call anything which might sleep. The moron,
+who wrote that code, missed completely that kthread_unpark() might end up
+in schedule().
+
+The solution is simpler than expected. The thread which controls the
+hotplug operation is waiting for the CPU to call complete() on the hotplug
+state completion. So the idle task of the upcoming CPU can set its state to
+CPUHP_AP_ONLINE_IDLE and invoke complete(). This in turn wakes the control
+task on a different CPU, which then can safely do the unpark and kick the
+now unparked hotplug thread of the upcoming CPU to complete the bringup to
+the final target state.
+
+Control CPU AP
+
+bringup_cpu();
+ __cpu_up() ------------>
+ bringup_ap();
+ bringup_wait_for_ap()
+ wait_for_completion();
+ cpuhp_online_idle();
+ <------------ complete();
+ unpark(AP->stopper);
+ unpark(AP->hotplugthread);
+ while(1)
+ do_idle();
+ kick(AP->hotplugthread);
+ wait_for_completion(); hotplug_thread()
+ run_online_callbacks();
+ complete();
+
+Fixes: 8df3e07e7f21 ("cpu/hotplug: Let upcoming cpu bring itself fully up")
+Reported-by: Vikram Mulukutla <markivx@codeaurora.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Sewior <bigeasy@linutronix.de>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1707042218020.2131@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpu.c | 37 +++++++++++++++++++------------------
+ 1 file changed, 19 insertions(+), 18 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -346,11 +346,25 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+
+ /* Notifier wrappers for transitioning to state machine */
+
++static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
++
+ static int bringup_wait_for_ap(unsigned int cpu)
+ {
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+
++ /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
+ wait_for_completion(&st->done);
++ BUG_ON(!cpu_online(cpu));
++
++ /* Unpark the stopper thread and the hotplug thread of the target cpu */
++ stop_machine_unpark(cpu);
++ kthread_unpark(st->thread);
++
++ /* Should we go further up ? */
++ if (st->target > CPUHP_AP_ONLINE_IDLE) {
++ __cpuhp_kick_ap_work(st);
++ wait_for_completion(&st->done);
++ }
+ return st->result;
+ }
+
+@@ -371,9 +385,7 @@ static int bringup_cpu(unsigned int cpu)
+ irq_unlock_sparse();
+ if (ret)
+ return ret;
+- ret = bringup_wait_for_ap(cpu);
+- BUG_ON(!cpu_online(cpu));
+- return ret;
++ return bringup_wait_for_ap(cpu);
+ }
+
+ /*
+@@ -859,31 +871,20 @@ void notify_cpu_starting(unsigned int cp
+ }
+
+ /*
+- * Called from the idle task. We need to set active here, so we can kick off
+- * the stopper thread and unpark the smpboot threads. If the target state is
+- * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
+- * cpu further.
++ * Called from the idle task. Wake up the controlling task which brings the
++ * stopper and the hotplug thread of the upcoming CPU up and then delegates
++ * the rest of the online bringup to the hotplug thread.
+ */
+ void cpuhp_online_idle(enum cpuhp_state state)
+ {
+ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+- unsigned int cpu = smp_processor_id();
+
+ /* Happens for the boot cpu */
+ if (state != CPUHP_AP_ONLINE_IDLE)
+ return;
+
+ st->state = CPUHP_AP_ONLINE_IDLE;
+-
+- /* Unpark the stopper thread and the hotplug thread of this cpu */
+- stop_machine_unpark(cpu);
+- kthread_unpark(st->thread);
+-
+- /* Should we go further up ? */
+- if (st->target > CPUHP_AP_ONLINE_IDLE)
+- __cpuhp_kick_ap_work(st);
+- else
+- complete(&st->done);
++ complete(&st->done);
+ }
+
+ /* Requires cpu_add_remove_lock to be held */
--- /dev/null
+From dea1d0f5f1284e3defee4b8484d9fc230686cd42 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 11 Jul 2017 22:06:24 +0200
+Subject: smp/hotplug: Replace BUG_ON and react useful
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit dea1d0f5f1284e3defee4b8484d9fc230686cd42 upstream.
+
+The move of the unpark functions to the control thread moved the BUG_ON()
+there as well. While it made some sense in the idle thread of the upcoming
+CPU, it's bogus to crash the control thread on the already online CPU,
+especially as the function has a return value and the callsite is prepared
+to handle an error return.
+
+Replace it with a WARN_ON_ONCE() and return a proper error code.
+
+Fixes: 9cd4f1a4e7a8 ("smp/hotplug: Move unparking of percpu threads to the control CPU")
+Rightfully-ranted-at-by: Linux Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -354,7 +354,8 @@ static int bringup_wait_for_ap(unsigned
+
+ /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
+ wait_for_completion(&st->done);
+- BUG_ON(!cpu_online(cpu));
++ if (WARN_ON_ONCE((!cpu_online(cpu))))
++ return -ECANCELED;
+
+ /* Unpark the stopper thread and the hotplug thread of the target cpu */
+ stop_machine_unpark(cpu);
--- /dev/null
+From d50daa2af2618dab6d21634e65a5fbcf4ae437d6 Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Thu, 29 Jun 2017 14:46:44 -0700
+Subject: spmi: Include OF based modalias in device uevent
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit d50daa2af2618dab6d21634e65a5fbcf4ae437d6 upstream.
+
+Include the OF-based modalias in the uevent sent when registering SPMI
+devices, so that user space has a chance to autoload the kernel module
+for the device.
+
+Tested-by: Rob Clark <robdclark@gmail.com>
+Reported-by: Rob Clark <robdclark@gmail.com>
+Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spmi/spmi.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/spmi/spmi.c
++++ b/drivers/spmi/spmi.c
+@@ -365,11 +365,23 @@ static int spmi_drv_remove(struct device
+ return 0;
+ }
+
++static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++ int ret;
++
++ ret = of_device_uevent_modalias(dev, env);
++ if (ret != -ENODEV)
++ return ret;
++
++ return 0;
++}
++
+ static struct bus_type spmi_bus_type = {
+ .name = "spmi",
+ .match = spmi_device_match,
+ .probe = spmi_drv_probe,
+ .remove = spmi_drv_remove,
++ .uevent = spmi_drv_uevent,
+ };
+
+ /**
--- /dev/null
+From db9108e054700c96322b0f0028546aa4e643cf0b Mon Sep 17 00:00:00 2001
+From: Chunyu Hu <chuhu@redhat.com>
+Date: Thu, 20 Jul 2017 18:36:09 +0800
+Subject: tracing: Fix kmemleak in instance_rmdir
+
+From: Chunyu Hu <chuhu@redhat.com>
+
+commit db9108e054700c96322b0f0028546aa4e643cf0b upstream.
+
+Hit the kmemleak when executing instance_rmdir, it forgot releasing
+mem of tracing_cpumask. With this fix, the warn does not appear any
+more.
+
+unreferenced object 0xffff93a8dfaa7c18 (size 8):
+ comm "mkdir", pid 1436, jiffies 4294763622 (age 9134.308s)
+ hex dump (first 8 bytes):
+ ff ff ff ff ff ff ff ff ........
+ backtrace:
+ [<ffffffff88b6567a>] kmemleak_alloc+0x4a/0xa0
+ [<ffffffff8861ea41>] __kmalloc_node+0xf1/0x280
+ [<ffffffff88b505d3>] alloc_cpumask_var_node+0x23/0x30
+ [<ffffffff88b5060e>] alloc_cpumask_var+0xe/0x10
+ [<ffffffff88571ab0>] instance_mkdir+0x90/0x240
+ [<ffffffff886e5100>] tracefs_syscall_mkdir+0x40/0x70
+ [<ffffffff886565c9>] vfs_mkdir+0x109/0x1b0
+ [<ffffffff8865b1d0>] SyS_mkdir+0xd0/0x100
+ [<ffffffff88403857>] do_syscall_64+0x67/0x150
+ [<ffffffff88b710e7>] return_from_SYSCALL_64+0x0/0x6a
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+Link: http://lkml.kernel.org/r/1500546969-12594-1-git-send-email-chuhu@redhat.com
+
+Fixes: ccfe9e42e451 ("tracing: Make tracing_cpumask available for all instances")
+Signed-off-by: Chunyu Hu <chuhu@redhat.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7594,6 +7594,7 @@ static int instance_rmdir(const char *na
+ }
+ kfree(tr->topts);
+
++ free_cpumask_var(tr->tracing_cpumask);
+ kfree(tr->name);
+ kfree(tr);
+
--- /dev/null
+From 6463a4571ceefc43908df4b016d8d5d8b8e85357 Mon Sep 17 00:00:00 2001
+From: Stephen Hemminger <stephen@networkplumber.org>
+Date: Sun, 25 Jun 2017 12:47:46 -0700
+Subject: vmbus: re-enable channel tasklet
+
+From: Stephen Hemminger <stephen@networkplumber.org>
+
+commit 6463a4571ceefc43908df4b016d8d5d8b8e85357 upstream.
+
+This problem shows up in 4.11 when netvsc driver is removed and reloaded.
+The problem is that the channel is closed during module removal and the
+tasklet for processing responses is disabled. When module is reloaded
+the channel is reopened but the tasklet is marked as disabled.
+The fix is to re-enable tasklet at the end of close which gets it back
+to the initial state.
+
+The issue is less urgent in 4.12 since network driver now uses NAPI
+and not the tasklet; and other VMBUS devices are rarely unloaded/reloaded.
+
+Fixes: dad72a1d2844 ("vmbus: remove hv_event_tasklet_disable/enable")
+
+Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hv/channel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -606,6 +606,8 @@ static int vmbus_close_internal(struct v
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+
+ out:
++ /* re-enable tasklet for use on re-open */
++ tasklet_enable(&channel->callback_event);
+ return ret;
+ }
+