--- /dev/null
+From 1fc2e41f7af4572b07190f9dec28396b418e9a36 Mon Sep 17 00:00:00 2001
+From: Alexander Tsoy <alexander@tsoy.me>
+Date: Mon, 22 May 2017 20:58:11 +0300
+Subject: ALSA: hda - apply STAC_9200_DELL_M22 quirk for Dell Latitude D430
+
+From: Alexander Tsoy <alexander@tsoy.me>
+
+commit 1fc2e41f7af4572b07190f9dec28396b418e9a36 upstream.
+
+This model is actually called 92XXM2-8 in Windows driver. But since pin
+configs for M22 and M28 are identical, just reuse M22 quirk.
+
+Fixes external microphone (tested) and probably docking station ports
+(not tested).
+
+Signed-off-by: Alexander Tsoy <alexander@tsoy.me>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_sigmatel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1537,6 +1537,8 @@ static const struct snd_pci_quirk stac92
+ "Dell Inspiron 1501", STAC_9200_DELL_M26),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
+ "unknown Dell", STAC_9200_DELL_M26),
++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
++ "Dell Latitude D430", STAC_9200_DELL_M22),
+ /* Panasonic */
+ SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
+ /* Gateway machines needs EAPD to be set on resume */
--- /dev/null
+From 09be4a5219610a6fae3215d4f51f948d6f5d2609 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 11 May 2017 13:46:12 -0400
+Subject: drm/amd/powerplay/smu7: add vblank check for mclk switching (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 09be4a5219610a6fae3215d4f51f948d6f5d2609 upstream.
+
+Check to make sure the vblank period is long enough to support
+mclk switching.
+
+v2: drop needless initial assignment (Nils)
+
+bug: https://bugs.freedesktop.org/show_bug.cgi?id=96868
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 31 ++++++++++++++++++++---
+ 1 file changed, 27 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2495,6 +2495,28 @@ static int smu7_get_power_state_size(str
+ return sizeof(struct smu7_power_state);
+ }
+
++static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
++ uint32_t vblank_time_us)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ uint32_t switch_limit_us;
++
++ switch (hwmgr->chip_id) {
++ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
++ case CHIP_POLARIS12:
++ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
++ break;
++ default:
++ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
++ break;
++ }
++
++ if (vblank_time_us < switch_limit_us)
++ return true;
++ else
++ return false;
++}
+
+ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *request_ps,
+@@ -2509,6 +2531,7 @@ static int smu7_apply_state_adjust_rules
+ bool disable_mclk_switching;
+ bool disable_mclk_switching_for_frame_lock;
+ struct cgs_display_info info = {0};
++ struct cgs_mode_info mode_info = {0};
+ const struct phm_clock_and_voltage_limits *max_limits;
+ uint32_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+@@ -2517,6 +2540,7 @@ static int smu7_apply_state_adjust_rules
+ int32_t count;
+ int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
++ info.mode_info = &mode_info;
+ data->battery_state = (PP_StateUILabel_Battery ==
+ request_ps->classification.ui_label);
+
+@@ -2543,8 +2567,6 @@ static int smu7_apply_state_adjust_rules
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
+-
+ minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
+ minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+
+@@ -2609,8 +2631,9 @@ static int smu7_apply_state_adjust_rules
+ PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+
+- disable_mclk_switching = (1 < info.display_count) ||
+- disable_mclk_switching_for_frame_lock;
++ disable_mclk_switching = ((1 < info.display_count) ||
++ disable_mclk_switching_for_frame_lock ||
++ smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us));
+
+ sclk = smu7_ps->performance_levels[0].engine_clock;
+ mclk = smu7_ps->performance_levels[0].memory_clock;
--- /dev/null
+From 2275a3a2fe9914ba6d76c8ea490da3c08342bd19 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 11 May 2017 13:57:41 -0400
+Subject: drm/amd/powerplay/smu7: disable mclk switching for high refresh rates
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 2275a3a2fe9914ba6d76c8ea490da3c08342bd19 upstream.
+
+Even if the vblank period would allow it, it still seems to
+be problematic on some cards.
+
+bug: https://bugs.freedesktop.org/show_bug.cgi?id=96868
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2633,7 +2633,8 @@ static int smu7_apply_state_adjust_rules
+
+ disable_mclk_switching = ((1 < info.display_count) ||
+ disable_mclk_switching_for_frame_lock ||
+- smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us));
++ smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
++ (mode_info.refresh_rate > 120));
+
+ sclk = smu7_ps->performance_levels[0].engine_clock;
+ mclk = smu7_ps->performance_levels[0].memory_clock;
--- /dev/null
+From 82bc9a42cf854fdf63155759c0aa790bd1f361b0 Mon Sep 17 00:00:00 2001
+From: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Date: Tue, 18 Apr 2017 13:43:32 +0200
+Subject: drm/gma500/psb: Actually use VBT mode when it is found
+
+From: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+
+commit 82bc9a42cf854fdf63155759c0aa790bd1f361b0 upstream.
+
+With LVDS we were incorrectly picking the pre-programmed mode instead of
+the prefered mode provided by VBT. Make sure we pick the VBT mode if
+one is provided. It is likely that the mode read-out code is still wrong
+but this patch fixes the immediate problem on most machines.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78562
+Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170418114332.12183-1-patrik.r.jakobsson@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/gma500/psb_intel_lvds.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+@@ -774,20 +774,23 @@ void psb_intel_lvds_init(struct drm_devi
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
++ DRM_DEBUG_KMS("Using mode from DDC\n");
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this? */
+- if (mode_dev->vbt_mode)
++ if (dev_priv->lfp_lvds_vbt_mode) {
+ mode_dev->panel_fixed_mode =
+- drm_mode_duplicate(dev, mode_dev->vbt_mode);
++ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+
+- if (!mode_dev->panel_fixed_mode)
+- if (dev_priv->lfp_lvds_vbt_mode)
+- mode_dev->panel_fixed_mode =
+- drm_mode_duplicate(dev,
+- dev_priv->lfp_lvds_vbt_mode);
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ DRM_DEBUG_KMS("Using mode from VBT\n");
++ goto out;
++ }
++ }
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+@@ -804,6 +807,7 @@ void psb_intel_lvds_init(struct drm_devi
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
++ DRM_DEBUG_KMS("Using pre-programmed mode\n");
+ goto out; /* FIXME: check for quirks */
+ }
+ }
--- /dev/null
+From 58d7e3e427db1bd68f33025519a9468140280a75 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 11 May 2017 13:14:14 -0400
+Subject: drm/radeon/ci: disable mclk switching for high refresh rates (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 58d7e3e427db1bd68f33025519a9468140280a75 upstream.
+
+Even if the vblank period would allow it, it still seems to
+be problematic on some cards.
+
+v2: fix logic inversion (Nils)
+
+bug: https://bugs.freedesktop.org/show_bug.cgi?id=96868
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/ci_dpm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct rade
+ u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+
++ /* disable mclk switching if the refresh is >120Hz, even if the
++ * blanking period would allow it
++ */
++ if (r600_dpm_get_vrefresh(rdev) > 120)
++ return true;
++
+ if (vblank_time < switch_limit)
+ return true;
+ else
--- /dev/null
+From 51964e9e12d0a054002a1a0d1dec4f661c7aaf28 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Mon, 30 Jan 2017 12:06:35 +0900
+Subject: drm/radeon: Fix vram_size/visible values in DRM_RADEON_GEM_INFO ioctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michel Dänzer <michel.daenzer@amd.com>
+
+commit 51964e9e12d0a054002a1a0d1dec4f661c7aaf28 upstream.
+
+vram_size is supposed to be the total amount of VRAM that can be used by
+userspace, which corresponds to the TTM VRAM manager size (which is
+normally the full amount of VRAM, but can be just the visible VRAM when
+DMA can't be used for BO migration for some reason).
+
+The above was incorrectly used for vram_visible before, resulting in
+generally too large values being reported.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_drv.c | 3 ++-
+ drivers/gpu/drm/radeon/radeon_gem.c | 4 ++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -97,9 +97,10 @@
+ * 2.46.0 - Add PFP_SYNC_ME support on evergreen
+ * 2.47.0 - Add UVD_NO_OP register support
+ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
++ * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
+ */
+ #define KMS_DRIVER_MAJOR 2
+-#define KMS_DRIVER_MINOR 48
++#define KMS_DRIVER_MINOR 49
+ #define KMS_DRIVER_PATCHLEVEL 0
+ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+ int radeon_driver_unload_kms(struct drm_device *dev);
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_dev
+
+ man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+
+- args->vram_size = rdev->mc.real_vram_size;
+- args->vram_visible = (u64)man->size << PAGE_SHIFT;
++ args->vram_size = (u64)man->size << PAGE_SHIFT;
++ args->vram_visible = rdev->mc.visible_vram_size;
+ args->vram_visible -= rdev->vram_pin_size;
+ args->gart_size = rdev->mc.gtt_size;
+ args->gart_size -= rdev->gart_pin_size;
--- /dev/null
+From 3d18e33735a02b1a90aecf14410bf3edbfd4d3dc Mon Sep 17 00:00:00 2001
+From: Lyude <lyude@redhat.com>
+Date: Thu, 11 May 2017 19:31:12 -0400
+Subject: drm/radeon: Unbreak HPD handling for r600+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lyude <lyude@redhat.com>
+
+commit 3d18e33735a02b1a90aecf14410bf3edbfd4d3dc upstream.
+
+We end up reading the interrupt register for HPD5, and then writing it
+to HPD6 which on systems without anything using HPD5 results in
+permanently disabling hotplug on one of the display outputs after the
+first time we acknowledge a hotplug interrupt from the GPU.
+
+This code is really bad. But for now, let's just fix this. I will
+hopefully have a large patch series to refactor all of this soon.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Lyude <lyude@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/cik.c | 4 ++--
+ drivers/gpu/drm/radeon/evergreen.c | 4 ++--
+ drivers/gpu/drm/radeon/r600.c | 2 +-
+ drivers/gpu/drm/radeon/si.c | 4 ++--
+ 4 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -7416,7 +7416,7 @@ static inline void cik_irq_ack(struct ra
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+- tmp = RREG32(DC_HPD5_INT_CONTROL);
++ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+@@ -7446,7 +7446,7 @@ static inline void cik_irq_ack(struct ra
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+- tmp = RREG32(DC_HPD5_INT_CONTROL);
++ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4933,7 +4933,7 @@ static void evergreen_irq_ack(struct rad
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+- tmp = RREG32(DC_HPD5_INT_CONTROL);
++ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+@@ -4964,7 +4964,7 @@ static void evergreen_irq_ack(struct rad
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+- tmp = RREG32(DC_HPD5_INT_CONTROL);
++ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3995,7 +3995,7 @@ static void r600_irq_ack(struct radeon_d
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+- tmp = RREG32(DC_HPD5_INT_CONTROL);
++ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6330,7 +6330,7 @@ static inline void si_irq_ack(struct rad
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+- tmp = RREG32(DC_HPD5_INT_CONTROL);
++ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+@@ -6361,7 +6361,7 @@ static inline void si_irq_ack(struct rad
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+- tmp = RREG32(DC_HPD5_INT_CONTROL);
++ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
--- /dev/null
+From 2ac97f0f6654da14312d125005c77a6010e0ea38 Mon Sep 17 00:00:00 2001
+From: Jason Gerecke <killertofu@gmail.com>
+Date: Tue, 25 Apr 2017 11:29:56 -0700
+Subject: HID: wacom: Have wacom_tpc_irq guard against possible NULL dereference
+
+From: Jason Gerecke <killertofu@gmail.com>
+
+commit 2ac97f0f6654da14312d125005c77a6010e0ea38 upstream.
+
+The following Smatch complaint was generated in response to commit
+2a6cdbd ("HID: wacom: Introduce new 'touch_input' device"):
+
+ drivers/hid/wacom_wac.c:1586 wacom_tpc_irq()
+ error: we previously assumed 'wacom->touch_input' could be null (see line 1577)
+
+The 'touch_input' and 'pen_input' variables point to the 'struct input_dev'
+used for relaying touch and pen events to userspace, respectively. If a
+device does not have a touch interface or pen interface, the associated
+input variable is NULL. The 'wacom_tpc_irq()' function is responsible for
+forwarding input reports to a more-specific IRQ handler function. An
+unknown report could theoretically be mistaken as e.g. a touch report
+on a device which does not have a touch interface. This can be prevented
+by only calling the pen/touch functions are called when the pen/touch
+pointers are valid.
+
+Fixes: 2a6cdbd ("HID: wacom: Introduce new 'touch_input' device")
+Signed-off-by: Jason Gerecke <jason.gerecke@wacom.com>
+Reviewed-by: Ping Cheng <ping.cheng@wacom.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/wacom_wac.c | 47 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 24 insertions(+), 23 deletions(-)
+
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1400,37 +1400,38 @@ static int wacom_tpc_irq(struct wacom_wa
+ {
+ unsigned char *data = wacom->data;
+
+- if (wacom->pen_input)
++ if (wacom->pen_input) {
+ dev_dbg(wacom->pen_input->dev.parent,
+ "%s: received report #%d\n", __func__, data[0]);
+- else if (wacom->touch_input)
++
++ if (len == WACOM_PKGLEN_PENABLED ||
++ data[0] == WACOM_REPORT_PENABLED)
++ return wacom_tpc_pen(wacom);
++ }
++ else if (wacom->touch_input) {
+ dev_dbg(wacom->touch_input->dev.parent,
+ "%s: received report #%d\n", __func__, data[0]);
+
+- switch (len) {
+- case WACOM_PKGLEN_TPC1FG:
+- return wacom_tpc_single_touch(wacom, len);
+-
+- case WACOM_PKGLEN_TPC2FG:
+- return wacom_tpc_mt_touch(wacom);
+-
+- case WACOM_PKGLEN_PENABLED:
+- return wacom_tpc_pen(wacom);
+-
+- default:
+- switch (data[0]) {
+- case WACOM_REPORT_TPC1FG:
+- case WACOM_REPORT_TPCHID:
+- case WACOM_REPORT_TPCST:
+- case WACOM_REPORT_TPC1FGE:
++ switch (len) {
++ case WACOM_PKGLEN_TPC1FG:
+ return wacom_tpc_single_touch(wacom, len);
+
+- case WACOM_REPORT_TPCMT:
+- case WACOM_REPORT_TPCMT2:
+- return wacom_mt_touch(wacom);
++ case WACOM_PKGLEN_TPC2FG:
++ return wacom_tpc_mt_touch(wacom);
+
+- case WACOM_REPORT_PENABLED:
+- return wacom_tpc_pen(wacom);
++ default:
++ switch (data[0]) {
++ case WACOM_REPORT_TPC1FG:
++ case WACOM_REPORT_TPCHID:
++ case WACOM_REPORT_TPCST:
++ case WACOM_REPORT_TPC1FGE:
++ return wacom_tpc_single_touch(wacom, len);
++
++ case WACOM_REPORT_TPCMT:
++ case WACOM_REPORT_TPCMT2:
++ return wacom_mt_touch(wacom);
++
++ }
+ }
+ }
+
--- /dev/null
+From 98883f1b5415ea9dce60d5178877d15f4faa10b8 Mon Sep 17 00:00:00 2001
+From: "Bryant G. Ly" <bryantly@linux.vnet.ibm.com>
+Date: Tue, 9 May 2017 11:50:26 -0500
+Subject: ibmvscsis: Clear left-over abort_cmd pointers
+
+From: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+
+commit 98883f1b5415ea9dce60d5178877d15f4faa10b8 upstream.
+
+With the addition of ibmvscsis->abort_cmd pointer within
+commit 25e78531268e ("ibmvscsis: Do not send aborted task response"),
+make sure to explicitly NULL these pointers when clearing
+DELAY_SEND flag.
+
+Do this for two cases, when getting the new new ibmvscsis
+descriptor in ibmvscsis_get_free_cmd() and before posting
+the response completion in ibmvscsis_send_messages().
+
+Signed-off-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+Reviewed-by: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+@@ -1169,6 +1169,8 @@ static struct ibmvscsis_cmd *ibmvscsis_g
+ cmd = list_first_entry_or_null(&vscsi->free_cmd,
+ struct ibmvscsis_cmd, list);
+ if (cmd) {
++ if (cmd->abort_cmd)
++ cmd->abort_cmd = NULL;
+ cmd->flags &= ~(DELAY_SEND);
+ list_del(&cmd->list);
+ cmd->iue = iue;
+@@ -1773,6 +1775,7 @@ static void ibmvscsis_send_messages(stru
+ if (cmd->abort_cmd) {
+ retry = true;
+ cmd->abort_cmd->flags &= ~(DELAY_SEND);
++ cmd->abort_cmd = NULL;
+ }
+
+ /*
--- /dev/null
+From 75dbf2d36f6b122ad3c1070fe4bf95f71bbff321 Mon Sep 17 00:00:00 2001
+From: "Bryant G. Ly" <bryantly@linux.vnet.ibm.com>
+Date: Wed, 10 May 2017 14:35:47 -0500
+Subject: ibmvscsis: Fix the incorrect req_lim_delta
+
+From: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+
+commit 75dbf2d36f6b122ad3c1070fe4bf95f71bbff321 upstream.
+
+The current code is not correctly calculating the req_lim_delta.
+
+We want to make sure vscsi->credit is always incremented when
+we do not send a response for the scsi op. Thus for the case where
+there is a successfully aborted task we need to make sure the
+vscsi->credit is incremented.
+
+v2 - Moves the original location of the vscsi->credit increment
+to a better spot. Since if we increment credit, the next command
+we send back will have increased req_lim_delta. But we probably
+shouldn't be doing that until the aborted cmd is actually released.
+Otherwise the client will think that it can send a new command, and
+we could find ourselves short of command elements. Not likely, but could
+happen.
+
+This patch depends on both:
+commit 25e78531268e ("ibmvscsis: Do not send aborted task response")
+commit 98883f1b5415 ("ibmvscsis: Clear left-over abort_cmd pointers")
+
+Signed-off-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+Reviewed-by: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+@@ -1790,6 +1790,25 @@ static void ibmvscsis_send_messages(stru
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi,
+ cmd);
++ /*
++ * With a successfully aborted op
++ * through LIO we want to increment the
++ * the vscsi credit so that when we dont
++ * send a rsp to the original scsi abort
++ * op (h_send_crq), but the tm rsp to
++ * the abort is sent, the credit is
++ * correctly sent with the abort tm rsp.
++ * We would need 1 for the abort tm rsp
++ * and 1 credit for the aborted scsi op.
++ * Thus we need to increment here.
++ * Also we want to increment the credit
++ * here because we want to make sure
++ * cmd is actually released first
++ * otherwise the client will think it
++ * it can send a new cmd, and we could
++ * find ourselves short of cmd elements.
++ */
++ vscsi->credit += 1;
+ } else {
+ iue = cmd->iue;
+
+@@ -2964,10 +2983,7 @@ static long srp_build_response(struct sc
+
+ rsp->opcode = SRP_RSP;
+
+- if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
+- rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
+- else
+- rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
++ rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+ rsp->tag = cmd->rsp.tag;
+ rsp->flags = 0;
+
--- /dev/null
+From 5e0cf5e6c43b9e19fc0284f69e5cd2b4a47523b0 Mon Sep 17 00:00:00 2001
+From: Jiang Yi <jiangyilism@gmail.com>
+Date: Tue, 16 May 2017 17:57:55 +0800
+Subject: iscsi-target: Always wait for kthread_should_stop() before kthread exit
+
+From: Jiang Yi <jiangyilism@gmail.com>
+
+commit 5e0cf5e6c43b9e19fc0284f69e5cd2b4a47523b0 upstream.
+
+There are three timing problems in the kthread usages of iscsi_target_mod:
+
+ - np_thread of struct iscsi_np
+ - rx_thread and tx_thread of struct iscsi_conn
+
+In iscsit_close_connection(), it calls
+
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+
+In conn->tx_thread, which is iscsi_target_tx_thread(), when it receive
+SIGINT the kthread will exit without checking the return value of
+kthread_should_stop().
+
+So if iscsi_target_tx_thread() exit right between send_sig(SIGINT...)
+and kthread_stop(...), the kthread_stop() will try to stop an already
+stopped kthread.
+
+This is invalid according to the documentation of kthread_stop().
+
+(Fix -ECONNRESET logout handling in iscsi_target_tx_thread and
+ early iscsi_target_rx_thread failure case - nab)
+
+Signed-off-by: Jiang Yi <jiangyilism@gmail.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/iscsi/iscsi_target.c | 30 ++++++++++++++++++++++++------
+ drivers/target/iscsi/iscsi_target_erl0.c | 6 +++++-
+ drivers/target/iscsi/iscsi_target_erl0.h | 2 +-
+ drivers/target/iscsi/iscsi_target_login.c | 4 ++++
+ 4 files changed, 34 insertions(+), 8 deletions(-)
+
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3798,6 +3798,8 @@ int iscsi_target_tx_thread(void *arg)
+ {
+ int ret = 0;
+ struct iscsi_conn *conn = arg;
++ bool conn_freed = false;
++
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+@@ -3823,12 +3825,14 @@ get_immediate:
+ goto transport_err;
+
+ ret = iscsit_handle_response_queue(conn);
+- if (ret == 1)
++ if (ret == 1) {
+ goto get_immediate;
+- else if (ret == -ECONNRESET)
++ } else if (ret == -ECONNRESET) {
++ conn_freed = true;
+ goto out;
+- else if (ret < 0)
++ } else if (ret < 0) {
+ goto transport_err;
++ }
+ }
+
+ transport_err:
+@@ -3838,8 +3842,13 @@ transport_err:
+ * responsible for cleaning up the early connection failure.
+ */
+ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+- iscsit_take_action_for_connection_exit(conn);
++ iscsit_take_action_for_connection_exit(conn, &conn_freed);
+ out:
++ if (!conn_freed) {
++ while (!kthread_should_stop()) {
++ msleep(100);
++ }
++ }
+ return 0;
+ }
+
+@@ -4012,6 +4021,7 @@ int iscsi_target_rx_thread(void *arg)
+ {
+ int rc;
+ struct iscsi_conn *conn = arg;
++ bool conn_freed = false;
+
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+@@ -4024,7 +4034,7 @@ int iscsi_target_rx_thread(void *arg)
+ */
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+ if (rc < 0 || iscsi_target_check_conn_state(conn))
+- return 0;
++ goto out;
+
+ if (!conn->conn_transport->iscsit_get_rx_pdu)
+ return 0;
+@@ -4033,7 +4043,15 @@ int iscsi_target_rx_thread(void *arg)
+
+ if (!signal_pending(current))
+ atomic_set(&conn->transport_failed, 1);
+- iscsit_take_action_for_connection_exit(conn);
++ iscsit_take_action_for_connection_exit(conn, &conn_freed);
++
++out:
++ if (!conn_freed) {
++ while (!kthread_should_stop()) {
++ msleep(100);
++ }
++ }
++
+ return 0;
+ }
+
+--- a/drivers/target/iscsi/iscsi_target_erl0.c
++++ b/drivers/target/iscsi/iscsi_target_erl0.c
+@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cle
+ }
+ }
+
+-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
++void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
+ {
++ *conn_freed = false;
++
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_e
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ spin_unlock_bh(&conn->state_lock);
+ iscsit_close_connection(conn);
++ *conn_freed = true;
+ return;
+ }
+
+@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_e
+ spin_unlock_bh(&conn->state_lock);
+
+ iscsit_handle_connection_cleanup(conn);
++ *conn_freed = true;
+ }
+--- a/drivers/target/iscsi/iscsi_target_erl0.h
++++ b/drivers/target/iscsi/iscsi_target_erl0.h
+@@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer
+ extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
+ extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+ extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
+-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
++extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
+
+ #endif /*** ISCSI_TARGET_ERL0_H ***/
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1460,5 +1460,9 @@ int iscsi_target_login_thread(void *arg)
+ break;
+ }
+
++ while (!kthread_should_stop()) {
++ msleep(100);
++ }
++
+ return 0;
+ }
--- /dev/null
+From a7306c3436e9c8e584a4b9fad5f3dc91be2a6076 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Fri, 2 Jun 2017 14:46:11 -0700
+Subject: ksm: prevent crash after write_protect_page fails
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit a7306c3436e9c8e584a4b9fad5f3dc91be2a6076 upstream.
+
+"err" needs to be left set to -EFAULT if split_huge_page succeeds.
+Otherwise if "err" gets clobbered with zero and write_protect_page
+fails, try_to_merge_one_page() will succeed instead of returning -EFAULT
+and then try_to_merge_with_ksm_page() will continue thinking kpage is a
+PageKsm when in fact it's still an anonymous page. Eventually it'll
+crash in page_add_anon_rmap.
+
+This has been reproduced on Fedora25 kernel but I can reproduce with
+upstream too.
+
+The bug was introduced in commit f765f540598a ("ksm: prepare to new THP
+semantics") introduced in v4.5.
+
+ page:fffff67546ce1cc0 count:4 mapcount:2 mapping:ffffa094551e36e1 index:0x7f0f46673
+ flags: 0x2ffffc0004007c(referenced|uptodate|dirty|lru|active|swapbacked)
+ page dumped because: VM_BUG_ON_PAGE(!PageLocked(page))
+ page->mem_cgroup:ffffa09674bf0000
+ ------------[ cut here ]------------
+ kernel BUG at mm/rmap.c:1222!
+ CPU: 1 PID: 76 Comm: ksmd Not tainted 4.9.3-200.fc25.x86_64 #1
+ RIP: do_page_add_anon_rmap+0x1c4/0x240
+ Call Trace:
+ page_add_anon_rmap+0x18/0x20
+ try_to_merge_with_ksm_page+0x50b/0x780
+ ksm_scan_thread+0x1211/0x1410
+ ? prepare_to_wait_event+0x100/0x100
+ ? try_to_merge_with_ksm_page+0x780/0x780
+ kthread+0xd9/0xf0
+ ? kthread_park+0x60/0x60
+ ret_from_fork+0x25/0x30
+
+Fixes: f765f54059 ("ksm: prepare to new THP semantics")
+Link: http://lkml.kernel.org/r/20170513131040.21732-1-aarcange@redhat.com
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Reported-by: Federico Simoncelli <fsimonce@redhat.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/ksm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -1002,8 +1002,7 @@ static int try_to_merge_one_page(struct
+ goto out;
+
+ if (PageTransCompound(page)) {
+- err = split_huge_page(page);
+- if (err)
++ if (split_huge_page(page))
+ goto out_unlock;
+ }
+
--- /dev/null
+From aa2efd5ea4041754da4046c3d2e7edaac9526258 Mon Sep 17 00:00:00 2001
+From: Daniel Thompson <daniel.thompson@linaro.org>
+Date: Tue, 24 Jan 2017 15:18:02 -0800
+Subject: mm/slub.c: trace free objects at KERN_INFO
+
+From: Daniel Thompson <daniel.thompson@linaro.org>
+
+commit aa2efd5ea4041754da4046c3d2e7edaac9526258 upstream.
+
+Currently when trace is enabled (e.g. slub_debug=T,kmalloc-128 ) the
+trace messages are mostly output at KERN_INFO. However the trace code
+also calls print_section() to hexdump the head of a free object. This
+is hard coded to use KERN_ERR, meaning the console is deluged with trace
+messages even if we've asked for quiet.
+
+Fix this the obvious way but adding a level parameter to
+print_section(), allowing calls from the trace code to use the same
+trace level as other trace messages.
+
+Link: http://lkml.kernel.org/r/20170113154850.518-1-daniel.thompson@linaro.org
+Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
+Acked-by: Christoph Lameter <cl@linux.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/slub.c | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -496,10 +496,11 @@ static inline int check_valid_pointer(st
+ return 1;
+ }
+
+-static void print_section(char *text, u8 *addr, unsigned int length)
++static void print_section(char *level, char *text, u8 *addr,
++ unsigned int length)
+ {
+ metadata_access_enable();
+- print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
++ print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+ length, 1);
+ metadata_access_disable();
+ }
+@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_ca
+ p, p - addr, get_freepointer(s, p));
+
+ if (s->flags & SLAB_RED_ZONE)
+- print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
++ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
++ s->red_left_pad);
+ else if (p > addr + 16)
+- print_section("Bytes b4 ", p - 16, 16);
++ print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
+
+- print_section("Object ", p, min_t(unsigned long, s->object_size,
+- PAGE_SIZE));
++ print_section(KERN_ERR, "Object ", p,
++ min_t(unsigned long, s->object_size, PAGE_SIZE));
+ if (s->flags & SLAB_RED_ZONE)
+- print_section("Redzone ", p + s->object_size,
++ print_section(KERN_ERR, "Redzone ", p + s->object_size,
+ s->inuse - s->object_size);
+
+ if (s->offset)
+@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_ca
+
+ if (off != size_from_object(s))
+ /* Beginning of the filler is the free pointer */
+- print_section("Padding ", p + off, size_from_object(s) - off);
++ print_section(KERN_ERR, "Padding ", p + off,
++ size_from_object(s) - off);
+
+ dump_stack();
+ }
+@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_ca
+ end--;
+
+ slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
+- print_section("Padding ", end - remainder, remainder);
++ print_section(KERN_ERR, "Padding ", end - remainder, remainder);
+
+ restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
+ return 0;
+@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s,
+ page->freelist);
+
+ if (!alloc)
+- print_section("Object ", (void *)object,
++ print_section(KERN_INFO, "Object ", (void *)object,
+ s->object_size);
+
+ dump_stack();
--- /dev/null
+From 986f75c876dbafed98eba7cb516c5118f155db23 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 22 May 2017 23:05:04 +0800
+Subject: nvme: avoid to use blk_mq_abort_requeue_list()
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 986f75c876dbafed98eba7cb516c5118f155db23 upstream.
+
+NVMe may add request into requeue list simply and not kick off the
+requeue if hw queues are stopped. Then blk_mq_abort_requeue_list()
+is called in both nvme_kill_queues() and nvme_ns_remove() for
+dealing with this issue.
+
+Unfortunately blk_mq_abort_requeue_list() is absolutely a
+race maker, for example, one request may be requeued during
+the aborting. So this patch just calls blk_mq_kick_requeue_list() in
+nvme_kill_queues() to handle this issue like what nvme_start_queues()
+does. Now all requests in requeue list when queues are stopped will be
+handled by blk_mq_kick_requeue_list() when queues are restarted, either
+in nvme_start_queues() or in nvme_kill_queues().
+
+Reported-by: Zhang Yi <yizhan@redhat.com>
+Reviewed-by: Keith Busch <keith.busch@intel.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/core.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1725,7 +1725,6 @@ static void nvme_ns_remove(struct nvme_n
+ sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+ &nvme_ns_attr_group);
+ del_gendisk(ns->disk);
+- blk_mq_abort_requeue_list(ns->queue);
+ blk_cleanup_queue(ns->queue);
+ }
+
+@@ -2048,7 +2047,6 @@ void nvme_kill_queues(struct nvme_ctrl *
+ continue;
+ revalidate_disk(ns->disk);
+ blk_set_queue_dying(ns->queue);
+- blk_mq_abort_requeue_list(ns->queue);
+
+ /*
+ * Forcibly start all queues to avoid having stuck requests.
+@@ -2056,6 +2054,9 @@ void nvme_kill_queues(struct nvme_ctrl *
+ * when the final removal happens.
+ */
+ blk_mq_start_hw_queues(ns->queue);
++
++ /* draining requests in requeue list */
++ blk_mq_kick_requeue_list(ns->queue);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+ }
--- /dev/null
+From 0544f5494a03b8846db74e02be5685d1f32b06c9 Mon Sep 17 00:00:00 2001
+From: Marta Rybczynska <mrybczyn@kalray.eu>
+Date: Mon, 10 Apr 2017 17:12:34 +0200
+Subject: nvme-rdma: support devices with queue size < 32
+
+From: Marta Rybczynska <mrybczyn@kalray.eu>
+
+commit 0544f5494a03b8846db74e02be5685d1f32b06c9 upstream.
+
+In the case of small NVMe-oF queue size (<32) we may enter a deadlock
+caused by the fact that the IB completions aren't sent waiting for 32
+and the send queue will fill up.
+
+The error is seen as (using mlx5):
+[ 2048.693355] mlx5_0:mlx5_ib_post_send:3765:(pid 7273):
+[ 2048.693360] nvme nvme1: nvme_rdma_post_send failed with error code -12
+
+This patch changes the way the signaling is done so that it depends on
+the queue depth now. The magic define has been removed completely.
+
+Signed-off-by: Marta Rybczynska <marta.rybczynska@kalray.eu>
+Signed-off-by: Samuel Jones <sjones@kalray.eu>
+Acked-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/rdma.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1011,6 +1011,19 @@ static void nvme_rdma_send_done(struct i
+ nvme_rdma_wr_error(cq, wc, "SEND");
+ }
+
++static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
++{
++ int sig_limit;
++
++ /*
++ * We signal completion every queue depth/2 and also handle the
++ * degenerated case of a device with queue_depth=1, where we
++ * would need to signal every message.
++ */
++ sig_limit = max(queue->queue_size / 2, 1);
++ return (++queue->sig_count % sig_limit) == 0;
++}
++
+ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
+ struct ib_send_wr *first, bool flush)
+@@ -1038,9 +1051,6 @@ static int nvme_rdma_post_send(struct nv
+ * Would have been way to obvious to handle this in hardware or
+ * at least the RDMA stack..
+ *
+- * This messy and racy code sniplet is copy and pasted from the iSER
+- * initiator, and the magic '32' comes from there as well.
+- *
+ * Always signal the flushes. The magic request used for the flush
+ * sequencer is not allocated in our driver's tagset and it's
+ * triggered to be freed by blk_cleanup_queue(). So we need to
+@@ -1048,7 +1058,7 @@ static int nvme_rdma_post_send(struct nv
+ * embeded in request's payload, is not freed when __ib_process_cq()
+ * calls wr_cqe->done().
+ */
+- if ((++queue->sig_count % 32) == 0 || flush)
++ if (nvme_rdma_queue_sig_limit(queue) || flush)
+ wr.send_flags |= IB_SEND_SIGNALED;
+
+ if (first)
--- /dev/null
+From 806f026f9b901eaf1a6baeb48b5da18d6a4f818e Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 22 May 2017 23:05:03 +0800
+Subject: nvme: use blk_mq_start_hw_queues() in nvme_kill_queues()
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 806f026f9b901eaf1a6baeb48b5da18d6a4f818e upstream.
+
+Inside nvme_kill_queues(), we have to start hw queues for
+draining requests in sw queues, .dispatch list and requeue list,
+so use blk_mq_start_hw_queues() instead of blk_mq_start_stopped_hw_queues()
+which only run queues if queues are stopped, but the queues may have
+been started already, for example nvme_start_queues() is called in reset work
+function.
+
+blk_mq_start_hw_queues() run hw queues in current context, instead
+of running asynchronously like before. Given nvme_kill_queues() is
+run from either remove context or reset worker context, both are fine
+to run hw queue directly. And the mutex of namespaces_mutex isn't a
+problem too becasue nvme_start_freeze() runs hw queue in this way
+already.
+
+Reported-by: Zhang Yi <yizhan@redhat.com>
+Reviewed-by: Keith Busch <keith.busch@intel.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/core.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2049,7 +2049,13 @@ void nvme_kill_queues(struct nvme_ctrl *
+ revalidate_disk(ns->disk);
+ blk_set_queue_dying(ns->queue);
+ blk_mq_abort_requeue_list(ns->queue);
+- blk_mq_start_stopped_hw_queues(ns->queue, true);
++
++ /*
++ * Forcibly start all queues to avoid having stuck requests.
++ * Note that we must ensure the queues are not stopped
++ * when the final removal happens.
++ */
++ blk_mq_start_hw_queues(ns->queue);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+ }
--- /dev/null
+From ff5a20169b98d84ad8d7f99f27c5ebbb008204d6 Mon Sep 17 00:00:00 2001
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Date: Fri, 2 Jun 2017 14:46:28 -0700
+Subject: pcmcia: remove left-over %Z format
+
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+
+commit ff5a20169b98d84ad8d7f99f27c5ebbb008204d6 upstream.
+
+Commit 5b5e0928f742 ("lib/vsprintf.c: remove %Z support") removed some
+usages of format %Z but forgot "%.2Zx". This makes clang 4.0 reports a
+-Wformat-extra-args warning because it does not know about %Z.
+
+Replace %Z with %z.
+
+Link: http://lkml.kernel.org/r/20170520090946.22562-1-nicolas.iooss_linux@m4x.org
+Signed-off-by: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Cc: Harald Welte <laforge@gnumonks.org>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/pcmcia/cm4040_cs.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/char/pcmcia/cm4040_cs.c
++++ b/drivers/char/pcmcia/cm4040_cs.c
+@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file
+
+ rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
+ if (rc <= 0) {
+- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
++ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
+ DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file
+ for (i = 0; i < bytes_to_write; i++) {
+ rc = wait_for_bulk_out_ready(dev);
+ if (rc <= 0) {
+- DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
++ DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
+ rc);
+ DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+ if (rc == -ERESTARTSYS)
+@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file
+ rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
+
+ if (rc <= 0) {
+- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
++ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
+ DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
--- /dev/null
+From f2e767bb5d6ee0d988cb7d4e54b0b21175802b6b Mon Sep 17 00:00:00 2001
+From: Ram Pai <linuxram@us.ibm.com>
+Date: Thu, 26 Jan 2017 16:37:01 -0200
+Subject: scsi: mpt3sas: Force request partial completion alignment
+
+From: Ram Pai <linuxram@us.ibm.com>
+
+commit f2e767bb5d6ee0d988cb7d4e54b0b21175802b6b upstream.
+
+The firmware or device, possibly under a heavy I/O load, can return on a
+partial unaligned boundary. Scsi-ml expects these requests to be
+completed on an alignment boundary. Scsi-ml blindly requeues the I/O
+without checking the alignment boundary of the I/O request for the
+remaining bytes. This leads to errors, since devices cannot perform
+non-aligned read/write operations.
+
+This patch fixes the issue in the driver. It aligns unaligned
+completions of FS requests, by truncating them to the nearest alignment
+boundary.
+
+[mkp: simplified if statement]
+
+Reported-by: Mauricio Faria De Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
+Signed-off-by: Ram Pai <linuxram@us.ibm.com>
+Acked-by: Sreekanth Reddy <Sreekanth.Reddy@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/mpt3sas/mpt3sas_scsih.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -4634,6 +4634,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *i
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 response_code = 0;
+ unsigned long flags;
++ unsigned int sector_sz;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+@@ -4692,6 +4693,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *i
+ }
+
+ xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
++
++ /* In case of bogus fw or device, we could end up having
++ * unaligned partial completion. We can force alignment here,
++ * then scsi-ml does not need to handle this misbehavior.
++ */
++ sector_sz = scmd->device->sector_size;
++ if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
++ xfer_cnt % sector_sz)) {
++ sdev_printk(KERN_INFO, scmd->device,
++ "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
++ xfer_cnt, sector_sz);
++ xfer_cnt = round_down(xfer_cnt, sector_sz);
++ }
++
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
drivers-tty-8250-only-call-fintek_8250_probe-when-doing-port-i-o.patch
i2c-i2c-tiny-usb-fix-buffer-not-being-dma-capable.patch
crypto-skcipher-add-missing-api-setkey-checks.patch
+x86-mce-export-memory_error.patch
acpi-nfit-fix-the-memory-error-check-in-nfit_handle_mce.patch
revert-acpi-button-change-default-behavior-to-lid_init_state-open.patch
mmc-sdhci-iproc-suppress-spurious-interrupt-with-multiblock-read.patch
+iscsi-target-always-wait-for-kthread_should_stop-before-kthread-exit.patch
+ibmvscsis-clear-left-over-abort_cmd-pointers.patch
+ibmvscsis-fix-the-incorrect-req_lim_delta.patch
+hid-wacom-have-wacom_tpc_irq-guard-against-possible-null-dereference.patch
+nvme-rdma-support-devices-with-queue-size-32.patch
+nvme-use-blk_mq_start_hw_queues-in-nvme_kill_queues.patch
+nvme-avoid-to-use-blk_mq_abort_requeue_list.patch
+scsi-mpt3sas-force-request-partial-completion-alignment.patch
+drm-amd-powerplay-smu7-add-vblank-check-for-mclk-switching-v2.patch
+drm-amd-powerplay-smu7-disable-mclk-switching-for-high-refresh-rates.patch
+drm-radeon-ci-disable-mclk-switching-for-high-refresh-rates-v2.patch
+drm-radeon-unbreak-hpd-handling-for-r600.patch
+drm-radeon-fix-vram_size-visible-values-in-drm_radeon_gem_info-ioctl.patch
+pcmcia-remove-left-over-z-format.patch
+alsa-hda-apply-stac_9200_dell_m22-quirk-for-dell-latitude-d430.patch
+x86-pat-fix-xorg-regression-on-cpus-that-don-t-support-pat.patch
+x86-boot-use-cross_compile-prefix-for-readelf.patch
+ksm-prevent-crash-after-write_protect_page-fails.patch
+slub-memcg-cure-the-brainless-abuse-of-sysfs-attributes.patch
+mm-slub.c-trace-free-objects-at-kern_info.patch
+drm-gma500-psb-actually-use-vbt-mode-when-it-is-found.patch
--- /dev/null
+From 478fe3037b2278d276d4cd9cd0ab06c4cb2e9b32 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 2 Jun 2017 14:46:25 -0700
+Subject: slub/memcg: cure the brainless abuse of sysfs attributes
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 478fe3037b2278d276d4cd9cd0ab06c4cb2e9b32 upstream.
+
+memcg_propagate_slab_attrs() abuses the sysfs attribute file functions
+to propagate settings from the root kmem_cache to a newly created
+kmem_cache. It does that with:
+
+ attr->show(root, buf);
+ attr->store(new, buf, strlen(bug);
+
+Aside of being a lazy and absurd hackery this is broken because it does
+not check the return value of the show() function.
+
+Some of the show() functions return 0 w/o touching the buffer. That
+means in such a case the store function is called with the stale content
+of the previous show(). That causes nonsense like invoking
+kmem_cache_shrink() on a newly created kmem_cache. In the worst case it
+would cause handing in an uninitialized buffer.
+
+This should be rewritten proper by adding a propagate() callback to
+those slub_attributes which must be propagated and avoid that insane
+conversion to and from ASCII, but that's too large for a hot fix.
+
+Check at least the return value of the show() function, so calling
+store() with stale content is prevented.
+
+Steven said:
+ "It can cause a deadlock with get_online_cpus() that has been uncovered
+ by recent cpu hotplug and lockdep changes that Thomas and Peter have
+ been doing.
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(cpu_hotplug.lock);
+ lock(slab_mutex);
+ lock(cpu_hotplug.lock);
+ lock(slab_mutex);
+
+ *** DEADLOCK ***"
+
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1705201244540.2255@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reported-by: Steven Rostedt <rostedt@goodmis.org>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/slub.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -5452,6 +5452,7 @@ static void memcg_propagate_slab_attrs(s
+ char mbuf[64];
+ char *buf;
+ struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
++ ssize_t len;
+
+ if (!attr || !attr->store || !attr->show)
+ continue;
+@@ -5476,8 +5477,9 @@ static void memcg_propagate_slab_attrs(s
+ buf = buffer;
+ }
+
+- attr->show(root_cache, buf);
+- attr->store(s, buf, strlen(buf));
++ len = attr->show(root_cache, buf);
++ if (len > 0)
++ attr->store(s, buf, len);
+ }
+
+ if (buffer)
--- /dev/null
+From 3780578761921f094179c6289072a74b2228c602 Mon Sep 17 00:00:00 2001
+From: Rob Landley <rob@landley.net>
+Date: Sat, 20 May 2017 15:03:29 -0500
+Subject: x86/boot: Use CROSS_COMPILE prefix for readelf
+
+From: Rob Landley <rob@landley.net>
+
+commit 3780578761921f094179c6289072a74b2228c602 upstream.
+
+The boot code Makefile contains a straight 'readelf' invocation. This
+causes build warnings in cross compile environments, when there is no
+unprefixed readelf accessible via $PATH.
+
+Add the missing $(CROSS_COMPILE) prefix.
+
+[ tglx: Rewrote changelog ]
+
+Fixes: 98f78525371b ("x86/boot: Refuse to build with data relocations")
+Signed-off-by: Rob Landley <rob@landley.net>
+Acked-by: Kees Cook <keescook@chromium.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Paul Bolle <pebolle@tiscali.nl>
+Cc: "H.J. Lu" <hjl.tools@gmail.com>
+Link: http://lkml.kernel.org/r/ced18878-693a-9576-a024-113ef39a22c0@landley.net
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/compressed/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(ob
+ quiet_cmd_check_data_rel = DATAREL $@
+ define cmd_check_data_rel
+ for obj in $(filter %.o,$^); do \
+- readelf -S $$obj | grep -qF .rel.local && { \
++ ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
+ echo "error: $$obj has data relocations!" >&2; \
+ exit 1; \
+ } || true; \
--- /dev/null
+From 2d1f406139ec20320bf38bcd2461aa8e358084b5 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 19 May 2017 11:39:09 +0200
+Subject: x86/MCE: Export memory_error()
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 2d1f406139ec20320bf38bcd2461aa8e358084b5 upstream.
+
+Export the function which checks whether an MCE is a memory error to
+other users so that we can reuse the logic. Drop the boot_cpu_data use,
+while at it, as mce.cpuvendor already has the CPU vendor in there.
+
+Integrate a piece from a patch from Vishal Verma
+<vishal.l.verma@intel.com> to export it for modules (nfit).
+
+The main reason we're exporting it is that the nfit handler
+nfit_handle_mce() needs to detect a memory error properly before doing
+its recovery actions.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Link: http://lkml.kernel.org/r/20170519093915.15413-2-bp@alien8.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mce.h | 1 +
+ arch/x86/kernel/cpu/mcheck/mce.c | 11 +++++------
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/mce.h
++++ b/arch/x86/include/asm/mce.h
+@@ -257,6 +257,7 @@ static inline void mce_amd_feature_init(
+ #endif
+
+ int mce_available(struct cpuinfo_x86 *c);
++bool mce_is_memory_error(struct mce *m);
+
+ DECLARE_PER_CPU(unsigned, mce_exception_count);
+ DECLARE_PER_CPU(unsigned, mce_poll_count);
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -598,16 +598,14 @@ static void mce_read_aux(struct mce *m,
+ }
+ }
+
+-static bool memory_error(struct mce *m)
++bool mce_is_memory_error(struct mce *m)
+ {
+- struct cpuinfo_x86 *c = &boot_cpu_data;
+-
+- if (c->x86_vendor == X86_VENDOR_AMD) {
++ if (m->cpuvendor == X86_VENDOR_AMD) {
+ /* ErrCodeExt[20:16] */
+ u8 xec = (m->status >> 16) & 0x1f;
+
+ return (xec == 0x0 || xec == 0x8);
+- } else if (c->x86_vendor == X86_VENDOR_INTEL) {
++ } else if (m->cpuvendor == X86_VENDOR_INTEL) {
+ /*
+ * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
+ *
+@@ -628,6 +626,7 @@ static bool memory_error(struct mce *m)
+
+ return false;
+ }
++EXPORT_SYMBOL_GPL(mce_is_memory_error);
+
+ DEFINE_PER_CPU(unsigned, mce_poll_count);
+
+@@ -691,7 +690,7 @@ bool machine_check_poll(enum mcp_flags f
+
+ severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
+
+- if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
++ if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
+ if (m.status & MCI_STATUS_ADDRV)
+ m.severity = severity;
+
--- /dev/null
+From cbed27cdf0e3f7ea3b2259e86b9e34df02be3fe4 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 18 Apr 2017 15:07:11 -0400
+Subject: x86/PAT: Fix Xorg regression on CPUs that don't support PAT
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit cbed27cdf0e3f7ea3b2259e86b9e34df02be3fe4 upstream.
+
+In the file arch/x86/mm/pat.c, there's a '__pat_enabled' variable. The
+variable is set to 1 by default and the function pat_init() sets
+__pat_enabled to 0 if the CPU doesn't support PAT.
+
+However, on AMD K6-3 CPUs, the processor initialization code never calls
+pat_init() and so __pat_enabled stays 1 and the function pat_enabled()
+returns true, even though the K6-3 CPU doesn't support PAT.
+
+The result of this bug is that a kernel warning is produced when attempting to
+start the Xserver and the Xserver doesn't start (fork() returns ENOMEM).
+Another symptom of this bug is that the framebuffer driver doesn't set the
+K6-3 MTRR registers:
+
+ x86/PAT: Xorg:3891 map pfn expected mapping type uncached-minus for [mem 0xe4000000-0xe5ffffff], got write-combining
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 3891 at arch/x86/mm/pat.c:1020 untrack_pfn+0x5c/0x9f
+ ...
+ x86/PAT: Xorg:3891 map pfn expected mapping type uncached-minus for [mem 0xe4000000-0xe5ffffff], got write-combining
+
+To fix the bug change pat_enabled() so that it returns true only if PAT
+initialization was actually done.
+
+Also, I changed boot_cpu_has(X86_FEATURE_PAT) to
+this_cpu_has(X86_FEATURE_PAT) in pat_ap_init(), so that we check the PAT
+feature on the processor that is being initialized.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Link: http://lkml.kernel.org/r/alpine.LRH.2.02.1704181501450.26399@file01.intranet.prod.int.rdu2.redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/pat.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -64,9 +64,11 @@ static int __init nopat(char *str)
+ }
+ early_param("nopat", nopat);
+
++static bool __read_mostly __pat_initialized = false;
++
+ bool pat_enabled(void)
+ {
+- return !!__pat_enabled;
++ return __pat_initialized;
+ }
+ EXPORT_SYMBOL_GPL(pat_enabled);
+
+@@ -224,13 +226,14 @@ static void pat_bsp_init(u64 pat)
+ }
+
+ wrmsrl(MSR_IA32_CR_PAT, pat);
++ __pat_initialized = true;
+
+ __init_cache_modes(pat);
+ }
+
+ static void pat_ap_init(u64 pat)
+ {
+- if (!boot_cpu_has(X86_FEATURE_PAT)) {
++ if (!this_cpu_has(X86_FEATURE_PAT)) {
+ /*
+ * If this happens we are on a secondary CPU, but switched to
+ * PAT on the boot CPU. We have no way to undo PAT.
+@@ -305,7 +308,7 @@ void pat_init(void)
+ u64 pat;
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+- if (!pat_enabled()) {
++ if (!__pat_enabled) {
+ init_cache_modes();
+ return;
+ }