--- /dev/null
+From f43e5210c739fe76a4b0ed851559d6902f20ceb1 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Mon, 23 Sep 2019 13:51:16 +0200
+Subject: cfg80211: initialize on-stack chandefs
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit f43e5210c739fe76a4b0ed851559d6902f20ceb1 upstream.
+
+In a few places we don't properly initialize on-stack chandefs,
+resulting in EDMG data to be non-zero, which broke things.
+
+Additionally, in a few places we rely on the driver to init the
+data completely, but perhaps we shouldn't as non-EDMG drivers
+may not initialize the EDMG data, also initialize it there.
+
+Cc: stable@vger.kernel.org
+Fixes: 2a38075cd0be ("nl80211: Add support for EDMG channels")
+Reported-by: Dmitry Osipenko <digetx@gmail.com>
+Tested-by: Dmitry Osipenko <digetx@gmail.com>
+Link: https://lore.kernel.org/r/1569239475-I2dcce394ecf873376c386a78f31c2ec8b538fa25@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/nl80211.c | 4 +++-
+ net/wireless/reg.c | 2 +-
+ net/wireless/wext-compat.c | 2 +-
+ 3 files changed, 5 insertions(+), 3 deletions(-)
+
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2597,6 +2597,8 @@ int nl80211_parse_chandef(struct cfg8021
+
+ control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]);
+
++ memset(chandef, 0, sizeof(*chandef));
++
+ chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
+ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+ chandef->center_freq1 = control_freq;
+@@ -3125,7 +3127,7 @@ static int nl80211_send_iface(struct sk_
+
+ if (rdev->ops->get_channel) {
+ int ret;
+- struct cfg80211_chan_def chandef;
++ struct cfg80211_chan_def chandef = {};
+
+ ret = rdev_get_channel(rdev, wdev, &chandef);
+ if (ret == 0) {
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2108,7 +2108,7 @@ static void reg_call_notifier(struct wip
+
+ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
+ {
+- struct cfg80211_chan_def chandef;
++ struct cfg80211_chan_def chandef = {};
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ enum nl80211_iftype iftype;
+
+--- a/net/wireless/wext-compat.c
++++ b/net/wireless/wext-compat.c
+@@ -797,7 +797,7 @@ static int cfg80211_wext_giwfreq(struct
+ {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+- struct cfg80211_chan_def chandef;
++ struct cfg80211_chan_def chandef = {};
+ int ret;
+
+ switch (wdev->iftype) {
--- /dev/null
+From 242b0931c1918c56cd1dc5563fd250a3c39b996d Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Fri, 20 Sep 2019 21:54:18 +0200
+Subject: cfg80211: validate SSID/MBSSID element ordering assumption
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 242b0931c1918c56cd1dc5563fd250a3c39b996d upstream.
+
+The code copying the data assumes that the SSID element is
+before the MBSSID element, but since the data is untrusted
+from the AP, this cannot be guaranteed.
+
+Validate that this is indeed the case and ignore the MBSSID
+otherwise, to avoid having to deal with both cases for the
+copy of data that should be between them.
+
+Cc: stable@vger.kernel.org
+Fixes: 0b8fb8235be8 ("cfg80211: Parsing of Multiple BSSID information in scanning")
+Link: https://lore.kernel.org/r/1569009255-I1673911f5eae02964e21bdc11b2bf58e5e207e59@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/scan.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1711,7 +1711,12 @@ cfg80211_update_notlisted_nontrans(struc
+ return;
+ new_ie_len -= trans_ssid[1];
+ mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
+- if (!mbssid)
++ /*
++ * It's not valid to have the MBSSID element before SSID
++ * ignore if that happens - the code below assumes it is
++ * after (while copying things inbetween).
++ */
++ if (!mbssid || mbssid < trans_ssid)
+ return;
+ new_ie_len -= mbssid[1];
+ rcu_read_lock();
--- /dev/null
+From e0e4a2ce7a059d051c66cd7c94314fef3cd91aea Mon Sep 17 00:00:00 2001
+From: Kevin Wang <kevin1.wang@amd.com>
+Date: Thu, 26 Sep 2019 16:16:41 +0800
+Subject: drm/amd/powerplay: change metrics update period from 1ms to 100ms
+
+From: Kevin Wang <kevin1.wang@amd.com>
+
+commit e0e4a2ce7a059d051c66cd7c94314fef3cd91aea upstream.
+
+v2:
+change period from 10ms to 100ms (typo error)
+
+too high frequence to update mertrics table will cause smu firmware
+error,so change mertrics table update period from 1ms to 100ms
+(navi10, 12, 14)
+
+Signed-off-by: Kevin Wang <kevin1.wang@amd.com>
+Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.3.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -532,7 +532,7 @@ static int navi10_get_metrics_table(stru
+ struct smu_table_context *smu_table= &smu->smu_table;
+ int ret = 0;
+
+- if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
++ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)smu_table->metrics_table, false);
+ if (ret) {
--- /dev/null
+From f2cbda2dba11de868759cae9c0d2bab5b8411406 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 3 Sep 2019 21:06:41 +0200
+Subject: drm/atomic: Reject FLIP_ASYNC unconditionally
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit f2cbda2dba11de868759cae9c0d2bab5b8411406 upstream.
+
+It's never been wired up. Only userspace that tried to use it (and
+didn't actually check whether anything works, but hey it builds) is
+the -modesetting atomic implementation. And we just shut that up.
+
+If there's anyone else then we need to silently accept this flag no
+matter what, and find a new one. Because once a flag is tainted, it's
+lost.
+
+Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Michel Dänzer <michel@daenzer.net>
+Cc: Alex Deucher <alexdeucher@gmail.com>
+Cc: Adam Jackson <ajax@redhat.com>
+Cc: Sean Paul <sean@poorly.run>
+Cc: David Airlie <airlied@linux.ie>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190903190642.32588-2-daniel.vetter@ffwll.ch
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_atomic_uapi.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/drm_atomic_uapi.c
++++ b/drivers/gpu/drm/drm_atomic_uapi.c
+@@ -1301,8 +1301,7 @@ int drm_mode_atomic_ioctl(struct drm_dev
+ if (arg->reserved)
+ return -EINVAL;
+
+- if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
+- !dev->mode_config.async_page_flip)
++ if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ return -EINVAL;
+
+ /* can't test and expect an event at the same time. */
--- /dev/null
+From 26b1d3b527e7bf3e24b814d617866ac5199ce68d Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Thu, 5 Sep 2019 20:53:18 +0200
+Subject: drm/atomic: Take the atomic toys away from X
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 26b1d3b527e7bf3e24b814d617866ac5199ce68d upstream.
+
+The -modesetting ddx has a totally broken idea of how atomic works:
+- doesn't disable old connectors, assuming they get auto-disable like
+ with the legacy setcrtc
+- assumes ASYNC_FLIP is wired through for the atomic ioctl
+- not a single call to TEST_ONLY
+
+Iow the implementation is a 1:1 translation of legacy ioctls to
+atomic, which is a) broken b) pointless.
+
+We already have bugs in both i915 and amdgpu-DC where this prevents us
+from enabling neat features.
+
+If anyone ever cares about atomic in X we can easily add a new atomic
+level (req->value == 2) for X to get back the shiny toys.
+
+Since these broken versions of -modesetting have been shipping,
+there's really no other way to get out of this bind.
+
+v2:
+- add an informational dmesg output (Rob, Ajax)
+- reorder after the DRIVER_ATOMIC check to avoid useless noise (Ilia)
+- allow req->value > 2 so that X can do another attempt at atomic in
+ the future
+
+v3: Go with paranoid, insist that the X should be first (suggested by
+Rob)
+
+Cc: Ilia Mirkin <imirkin@alum.mit.edu>
+References: https://gitlab.freedesktop.org/xorg/xserver/issues/629
+References: https://gitlab.freedesktop.org/xorg/xserver/merge_requests/180
+References: abbc0697d5fb ("drm/fb: revert the i915 Actually configure untiled displays from master")
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> (v1)
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com> (v1)
+Cc: Michel Dänzer <michel@daenzer.net>
+Cc: Alex Deucher <alexdeucher@gmail.com>
+Cc: Adam Jackson <ajax@redhat.com>
+Acked-by: Adam Jackson <ajax@redhat.com>
+Cc: Sean Paul <sean@poorly.run>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Rob Clark <robdclark@gmail.com>
+Acked-by: Rob Clark <robdclark@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190905185318.31363-1-daniel.vetter@ffwll.ch
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_ioctl.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -336,7 +336,12 @@ drm_setclientcap(struct drm_device *dev,
+ case DRM_CLIENT_CAP_ATOMIC:
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return -EOPNOTSUPP;
+- if (req->value > 1)
++ /* The modesetting DDX has a totally broken idea of atomic. */
++ if (current->comm[0] == 'X' && req->value == 1) {
++ pr_info("broken atomic modeset userspace detected, disabling atomic\n");
++ return -EOPNOTSUPP;
++ }
++ if (req->value > 2)
+ return -EINVAL;
+ file_priv->atomic = req->value;
+ file_priv->universal_planes = req->value;
--- /dev/null
+From cffb4c3ea37248c4fc2f4ce747e5c24af88aec76 Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Wed, 25 Sep 2019 10:21:09 +0200
+Subject: drm/i915/dp: Fix dsc bpp calculations, v5.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+commit cffb4c3ea37248c4fc2f4ce747e5c24af88aec76 upstream.
+
+There was a integer wraparound when mode_clock became too high,
+and we didn't correct for the FEC overhead factor when dividing,
+with the calculations breaking at HBR3.
+
+As a result our calculated bpp was way too high, and the link width
+limitation never came into effect.
+
+Print out the resulting bpp calcululations as a sanity check, just
+in case we ever have to debug it later on again.
+
+We also used the wrong factor for FEC. While bspec mentions 2.4%,
+all the calculations use 1/0.972261, and the same ratio should be
+applied to data M/N as well, so use it there when FEC is enabled.
+
+This fixes the FIFO underrun we are seeing with FEC enabled.
+
+Changes since v2:
+- Handle fec_enable in intel_link_compute_m_n, so only data M/N is adjusted. (Ville)
+- Fix initial hardware readout for FEC. (Ville)
+Changes since v3:
+- Remove bogus fec_to_mode_clock. (Ville)
+Changes since v4:
+- Use the correct register for icl. (Ville)
+- Split hw readout to a separate patch.
+
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Fixes: d9218c8f6cf4 ("drm/i915/dp: Add helpers for Compressed BPP and Slice Count for DSC")
+Cc: <stable@vger.kernel.org> # v5.0+
+Cc: Manasi Navare <manasi.d.navare@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190925082110.17439-1-maarten.lankhorst@linux.intel.com
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+(cherry picked from commit ed06efb801bd291e935238d3fba46fa03d098f0e)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/display/intel_display.c | 12 +
+ drivers/gpu/drm/i915/display/intel_display.h | 2
+ drivers/gpu/drm/i915/display/intel_dp.c | 184 +++++++++++++--------------
+ drivers/gpu/drm/i915/display/intel_dp.h | 6
+ drivers/gpu/drm/i915/display/intel_dp_mst.c | 2
+ 5 files changed, 107 insertions(+), 99 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -7132,7 +7132,7 @@ retry:
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+- link_bw, &pipe_config->fdi_m_n, false);
++ link_bw, &pipe_config->fdi_m_n, false, false);
+
+ ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+@@ -7379,11 +7379,15 @@ void
+ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+- bool constant_n)
++ bool constant_n, bool fec_enable)
+ {
+- m_n->tu = 64;
++ u32 data_clock = bits_per_pixel * pixel_clock;
++
++ if (fec_enable)
++ data_clock = intel_dp_mode_to_fec_clock(data_clock);
+
+- compute_m_n(bits_per_pixel * pixel_clock,
++ m_n->tu = 64;
++ compute_m_n(data_clock,
+ link_clock * nlanes * 8,
+ &m_n->gmch_m, &m_n->gmch_n,
+ constant_n);
+--- a/drivers/gpu/drm/i915/display/intel_display.h
++++ b/drivers/gpu/drm/i915/display/intel_display.h
+@@ -351,7 +351,7 @@ struct intel_link_m_n {
+ void intel_link_compute_m_n(u16 bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+- bool constant_n);
++ bool constant_n, bool fec_enable);
+ bool is_ccs_modifier(u64 modifier);
+ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -76,8 +76,8 @@
+ #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
+ #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+-/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
+-#define DP_DSC_FEC_OVERHEAD_FACTOR 976
++/* DP DSC FEC Overhead factor = 1/(0.972261) */
++#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
+
+ /* Compliance test status bits */
+ #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
+@@ -526,6 +526,97 @@ int intel_dp_get_link_train_fallback_val
+ return 0;
+ }
+
++u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
++{
++ return div_u64(mul_u32_u32(mode_clock, 1000000U),
++ DP_DSC_FEC_OVERHEAD_FACTOR);
++}
++
++static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
++ u32 mode_clock, u32 mode_hdisplay)
++{
++ u32 bits_per_pixel, max_bpp_small_joiner_ram;
++ int i;
++
++ /*
++ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
++ * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
++ * for SST -> TimeSlotsPerMTP is 1,
++ * for MST -> TimeSlotsPerMTP has to be calculated
++ */
++ bits_per_pixel = (link_clock * lane_count * 8) /
++ intel_dp_mode_to_fec_clock(mode_clock);
++ DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
++
++ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
++ max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
++ DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
++
++ /*
++ * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
++ * check, output bpp from small joiner RAM check)
++ */
++ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
++
++ /* Error out if the max bpp is less than smallest allowed valid bpp */
++ if (bits_per_pixel < valid_dsc_bpp[0]) {
++ DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
++ bits_per_pixel, valid_dsc_bpp[0]);
++ return 0;
++ }
++
++ /* Find the nearest match in the array of known BPPs from VESA */
++ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
++ if (bits_per_pixel < valid_dsc_bpp[i + 1])
++ break;
++ }
++ bits_per_pixel = valid_dsc_bpp[i];
++
++ /*
++ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
++ * fractional part is 0
++ */
++ return bits_per_pixel << 4;
++}
++
++static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
++ int mode_clock, int mode_hdisplay)
++{
++ u8 min_slice_count, i;
++ int max_slice_width;
++
++ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
++ min_slice_count = DIV_ROUND_UP(mode_clock,
++ DP_DSC_MAX_ENC_THROUGHPUT_0);
++ else
++ min_slice_count = DIV_ROUND_UP(mode_clock,
++ DP_DSC_MAX_ENC_THROUGHPUT_1);
++
++ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
++ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
++ DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
++ max_slice_width);
++ return 0;
++ }
++ /* Also take into account max slice width */
++ min_slice_count = min_t(u8, min_slice_count,
++ DIV_ROUND_UP(mode_hdisplay,
++ max_slice_width));
++
++ /* Find the closest match to the valid slice count values */
++ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
++ if (valid_dsc_slicecount[i] >
++ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
++ false))
++ break;
++ if (min_slice_count <= valid_dsc_slicecount[i])
++ return valid_dsc_slicecount[i];
++ }
++
++ DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
++ return 0;
++}
++
+ static enum drm_mode_status
+ intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+@@ -2248,7 +2339,7 @@ intel_dp_compute_config(struct intel_enc
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+- constant_n);
++ constant_n, pipe_config->fec_enable);
+
+ if (intel_connector->panel.downclock_mode != NULL &&
+ dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
+@@ -2258,7 +2349,7 @@ intel_dp_compute_config(struct intel_enc
+ intel_connector->panel.downclock_mode->clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m2_n2,
+- constant_n);
++ constant_n, pipe_config->fec_enable);
+ }
+
+ if (!HAS_DDI(dev_priv))
+@@ -4345,91 +4436,6 @@ intel_dp_get_sink_irq_esi(struct intel_d
+ DP_DPRX_ESI_LEN;
+ }
+
+-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+- int mode_clock, int mode_hdisplay)
+-{
+- u16 bits_per_pixel, max_bpp_small_joiner_ram;
+- int i;
+-
+- /*
+- * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+- * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
+- * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
+- * for MST -> TimeSlotsPerMTP has to be calculated
+- */
+- bits_per_pixel = (link_clock * lane_count * 8 *
+- DP_DSC_FEC_OVERHEAD_FACTOR) /
+- mode_clock;
+-
+- /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+- max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
+- mode_hdisplay;
+-
+- /*
+- * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
+- * check, output bpp from small joiner RAM check)
+- */
+- bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+-
+- /* Error out if the max bpp is less than smallest allowed valid bpp */
+- if (bits_per_pixel < valid_dsc_bpp[0]) {
+- DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
+- return 0;
+- }
+-
+- /* Find the nearest match in the array of known BPPs from VESA */
+- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+- if (bits_per_pixel < valid_dsc_bpp[i + 1])
+- break;
+- }
+- bits_per_pixel = valid_dsc_bpp[i];
+-
+- /*
+- * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+- * fractional part is 0
+- */
+- return bits_per_pixel << 4;
+-}
+-
+-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+- int mode_clock,
+- int mode_hdisplay)
+-{
+- u8 min_slice_count, i;
+- int max_slice_width;
+-
+- if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+- min_slice_count = DIV_ROUND_UP(mode_clock,
+- DP_DSC_MAX_ENC_THROUGHPUT_0);
+- else
+- min_slice_count = DIV_ROUND_UP(mode_clock,
+- DP_DSC_MAX_ENC_THROUGHPUT_1);
+-
+- max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+- if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+- DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+- max_slice_width);
+- return 0;
+- }
+- /* Also take into account max slice width */
+- min_slice_count = min_t(u8, min_slice_count,
+- DIV_ROUND_UP(mode_hdisplay,
+- max_slice_width));
+-
+- /* Find the closest match to the valid slice count values */
+- for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+- if (valid_dsc_slicecount[i] >
+- drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+- false))
+- break;
+- if (min_slice_count <= valid_dsc_slicecount[i])
+- return valid_dsc_slicecount[i];
+- }
+-
+- DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+- return 0;
+-}
+-
+ static void
+ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+--- a/drivers/gpu/drm/i915/display/intel_dp.h
++++ b/drivers/gpu/drm/i915/display/intel_dp.h
+@@ -102,10 +102,6 @@ bool intel_dp_source_supports_hbr2(struc
+ bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
+ bool
+ intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
+-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+- int mode_clock, int mode_hdisplay);
+-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+- int mode_hdisplay);
+
+ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
+@@ -120,4 +116,6 @@ static inline unsigned int intel_dp_unus
+ return ~((1 << lane_count) - 1) & 0xf;
+ }
+
++u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
++
+ #endif /* __INTEL_DP_H__ */
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_con
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ &crtc_state->dp_m_n,
+- constant_n);
++ constant_n, crtc_state->fec_enable);
+ crtc_state->dp_m_n.tu = slots;
+
+ return 0;
--- /dev/null
+From 576f05865581f82ac988ffec70e4e2ebd31165db Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Tue, 30 Jul 2019 12:21:51 +0100
+Subject: drm/i915: Flush extra hard after writing relocations through the GTT
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 576f05865581f82ac988ffec70e4e2ebd31165db upstream.
+
+Recently discovered in commit bdae33b8b82b ("drm/i915: Use maximum write
+flush for pwrite_gtt") was that we needed to our full write barrier
+before changing the GGTT PTE to ensure that our indirect writes through
+the GTT landed before the PTE changed (and the writes end up in a
+different page). That also applies to our GGTT relocation path.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: stable@vger.kernel.org
+Reviewed-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190730112151.5633-4-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -1018,11 +1018,12 @@ static void reloc_cache_reset(struct rel
+ kunmap_atomic(vaddr);
+ i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
+ } else {
+- wmb();
++ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
++
++ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+- if (cache->node.allocated) {
+- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
++ if (cache->node.allocated) {
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
+@@ -1077,6 +1078,7 @@ static void *reloc_iomap(struct drm_i915
+ void *vaddr;
+
+ if (cache->vaddr) {
++ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
+ } else {
+ struct i915_vma *vma;
+@@ -1118,7 +1120,6 @@ static void *reloc_iomap(struct drm_i915
+
+ offset = cache->node.start;
+ if (cache->node.allocated) {
+- wmb();
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
--- /dev/null
+From 0a3242bdb47713e09cb004a0ba4947d3edf82d8a Mon Sep 17 00:00:00 2001
+From: Xiaolin Zhang <xiaolin.zhang@intel.com>
+Date: Tue, 27 Aug 2019 16:39:23 +0800
+Subject: drm/i915/gvt: update vgpu workload head pointer correctly
+
+From: Xiaolin Zhang <xiaolin.zhang@intel.com>
+
+commit 0a3242bdb47713e09cb004a0ba4947d3edf82d8a upstream.
+
+when creating a vGPU workload, the guest context head pointer should
+be updated correctly by comparing with the exsiting workload in the
+guest worklod queue including the current running context.
+
+in some situation, there is a running context A and then received 2 new
+vGPU workload context B and A. in the new workload context A, it's head
+pointer should be updated with the running context A's tail.
+
+v2: walk through guest workload list in backward way.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Xiaolin Zhang <xiaolin.zhang@intel.com>
+Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gvt/scheduler.c | 28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gvt/scheduler.c
++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
+@@ -1424,9 +1424,6 @@ static int prepare_mm(struct intel_vgpu_
+ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+-#define get_last_workload(q) \
+- (list_empty(q) ? NULL : container_of(q->prev, \
+- struct intel_vgpu_workload, list))
+ /**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+@@ -1446,7 +1443,7 @@ intel_vgpu_create_workload(struct intel_
+ {
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+- struct intel_vgpu_workload *last_workload = get_last_workload(q);
++ struct intel_vgpu_workload *last_workload = NULL;
+ struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u64 ring_context_gpa;
+@@ -1472,15 +1469,20 @@ intel_vgpu_create_workload(struct intel_
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+- if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+- gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+- gvt_dbg_el("ctx head %x real head %lx\n", head,
+- last_workload->rb_tail);
+- /*
+- * cannot use guest context head pointer here,
+- * as it might not be updated at this time
+- */
+- head = last_workload->rb_tail;
++ list_for_each_entry_reverse(last_workload, q, list) {
++
++ if (same_context(&last_workload->ctx_desc, desc)) {
++ gvt_dbg_el("ring id %d cur workload == last\n",
++ ring_id);
++ gvt_dbg_el("ctx head %x real head %lx\n", head,
++ last_workload->rb_tail);
++ /*
++ * cannot use guest context head pointer here,
++ * as it might not be updated at this time
++ */
++ head = last_workload->rb_tail;
++ break;
++ }
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
--- /dev/null
+From 9e77f5001b9833a6bdd3940df245053c2212a32b Mon Sep 17 00:00:00 2001
+From: Xiaolin Zhang <xiaolin.zhang@intel.com>
+Date: Fri, 23 Aug 2019 14:57:31 +0800
+Subject: drm/i915: to make vgpu ppgtt notificaiton as atomic operation
+
+From: Xiaolin Zhang <xiaolin.zhang@intel.com>
+
+commit 9e77f5001b9833a6bdd3940df245053c2212a32b upstream.
+
+vgpu ppgtt notification was split into 2 steps, the first step is to
+update PVINFO's pdp register and then write PVINFO's g2v_notify register
+with action code to tirgger ppgtt notification to GVT side.
+
+currently these steps were not atomic operations due to no any protection,
+so it is easy to enter race condition state during the MTBF, stress and
+IGT test to cause GPU hang.
+
+the solution is to add a lock to make vgpu ppgtt notication as atomic
+operation.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Xiaolin Zhang <xiaolin.zhang@intel.com>
+Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Link: https://patchwork.freedesktop.org/patch/msgid/1566543451-13955-1-git-send-email-xiaolin.zhang@intel.com
+(cherry picked from commit 52988009843160c5b366b4082ed6df48041c655c)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 1 +
+ drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +++++++-----
+ drivers/gpu/drm/i915/i915_vgpu.c | 1 +
+ 3 files changed, 9 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1073,6 +1073,7 @@ struct i915_frontbuffer_tracking {
+ };
+
+ struct i915_virtual_gpu {
++ struct mutex lock; /* serialises sending of g2v_notify command pkts */
+ bool active;
+ u32 caps;
+ };
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -1248,14 +1248,15 @@ free_scratch_page:
+ return ret;
+ }
+
+-static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
++static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+ {
+- struct i915_address_space *vm = &ppgtt->vm;
+- struct drm_i915_private *dev_priv = vm->i915;
++ struct drm_i915_private *dev_priv = ppgtt->vm.i915;
+ enum vgt_g2v_type msg;
+ int i;
+
+- if (i915_vm_is_4lvl(vm)) {
++ mutex_lock(&dev_priv->vgpu.lock);
++
++ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const u64 daddr = px_dma(ppgtt->pd);
+
+ I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+@@ -1275,9 +1276,10 @@ static int gen8_ppgtt_notify_vgt(struct
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
+ }
+
++ /* g2v_notify atomically (via hv trap) consumes the message packet. */
+ I915_WRITE(vgtif_reg(g2v_notify), msg);
+
+- return 0;
++ mutex_unlock(&dev_priv->vgpu.lock);
+ }
+
+ static void gen8_free_scratch(struct i915_address_space *vm)
+--- a/drivers/gpu/drm/i915/i915_vgpu.c
++++ b/drivers/gpu/drm/i915/i915_vgpu.c
+@@ -79,6 +79,7 @@ void i915_check_vgpu(struct drm_i915_pri
+ dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
+
+ dev_priv->vgpu.active = true;
++ mutex_init(&dev_priv->vgpu.lock);
+ DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+ }
+
--- /dev/null
+From bdae33b8b82bb379a5b11040b0b37df25c7871c9 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 18 Jul 2019 15:54:05 +0100
+Subject: drm/i915: Use maximum write flush for pwrite_gtt
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit bdae33b8b82bb379a5b11040b0b37df25c7871c9 upstream.
+
+As recently disovered by forcing big-core (!llc) machines to use the GTT
+paths, we need our full GTT write flush before manipulating the GTT PTE
+or else the writes may be directed to the wrong page.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Matthew Auld <matthew.william.auld@gmail.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190718145407.21352-2-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -648,7 +648,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915
+ unsigned int page_length = PAGE_SIZE - page_offset;
+ page_length = remain < page_length ? remain : page_length;
+ if (node.allocated) {
+- wmb(); /* flush the write before we modify the GGTT */
++ /* flush the write before we modify the GGTT */
++ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
+@@ -677,8 +678,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915
+ i915_gem_object_unlock_fence(obj, fence);
+ out_unpin:
+ mutex_lock(&i915->drm.struct_mutex);
++ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ if (node.allocated) {
+- wmb();
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
+ remove_mappable_node(&node);
+ } else {
--- /dev/null
+From cb6d7c7dc7ff8cace666ddec66334117a6068ce2 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 8 Jul 2019 15:03:27 +0100
+Subject: drm/i915/userptr: Acquire the page lock around set_page_dirty()
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit cb6d7c7dc7ff8cace666ddec66334117a6068ce2 upstream.
+
+set_page_dirty says:
+
+ For pages with a mapping this should be done under the page lock
+ for the benefit of asynchronous memory errors who prefer a
+ consistent dirty state. This rule can be broken in some special
+ cases, but should be better not to.
+
+Under those rules, it is only safe for us to use the plain set_page_dirty
+calls for shmemfs/anonymous memory. Userptr may be used with real
+mappings and so needs to use the locked version (set_page_dirty_lock).
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203317
+Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl")
+References: 6dcc693bc57f ("ext4: warn when page is dirtied without buffers")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190708140327.26825-1-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+@@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i9
+
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
+- set_page_dirty(page);
++ /*
++ * As this may not be anonymous memory (e.g. shmem)
++ * but exist on a real mapping, we have to lock
++ * the page in order to dirty it -- holding
++ * the page reference is not sufficient to
++ * prevent the inode from being truncated.
++ * Play safe and take the lock.
++ */
++ set_page_dirty_lock(page);
+
+ mark_page_accessed(page);
+ put_page(page);
--- /dev/null
+From 28ba1b1da49a20ba8fb767d6ddd7c521ec79a119 Mon Sep 17 00:00:00 2001
+From: Anders Roxell <anders.roxell@linaro.org>
+Date: Tue, 30 Jul 2019 17:30:56 +0200
+Subject: drm: mali-dp: Mark expected switch fall-through
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+commit 28ba1b1da49a20ba8fb767d6ddd7c521ec79a119 upstream.
+
+Now that -Wimplicit-fallthrough is passed to GCC by default, the
+following warnings shows up:
+
+../drivers/gpu/drm/arm/malidp_hw.c: In function ‘malidp_format_get_bpp’:
+../drivers/gpu/drm/arm/malidp_hw.c:387:8: warning: this statement may fall
+ through [-Wimplicit-fallthrough=]
+ bpp = 30;
+ ~~~~^~~~
+../drivers/gpu/drm/arm/malidp_hw.c:388:3: note: here
+ case DRM_FORMAT_YUV420_10BIT:
+ ^~~~
+../drivers/gpu/drm/arm/malidp_hw.c: In function ‘malidp_se_irq’:
+../drivers/gpu/drm/arm/malidp_hw.c:1311:4: warning: this statement may fall
+ through [-Wimplicit-fallthrough=]
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+../drivers/gpu/drm/arm/malidp_hw.c:1313:3: note: here
+ case MW_START:
+ ^~~~
+
+Rework to add a 'break;' in a case that didn't have it so that
+the compiler doesn't warn about fall-through.
+
+Cc: stable@vger.kernel.org # v5.2+
+Fixes: b8207562abdd ("drm/arm/malidp: Specified the rotation memory requirements for AFBC YUV formats")
+Acked-by: Liviu Dudau <liviu.dudau@arm.com>
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190730153056.3606-1-anders.roxell@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/arm/malidp_hw.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/arm/malidp_hw.c
++++ b/drivers/gpu/drm/arm/malidp_hw.c
+@@ -385,6 +385,7 @@ int malidp_format_get_bpp(u32 fmt)
+ switch (fmt) {
+ case DRM_FORMAT_VUY101010:
+ bpp = 30;
++ break;
+ case DRM_FORMAT_YUV420_10BIT:
+ bpp = 15;
+ break;
+@@ -1309,7 +1310,7 @@ static irqreturn_t malidp_se_irq(int irq
+ break;
+ case MW_RESTART:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+- /* fall through to a new start */
++ /* fall through - to a new start */
+ case MW_START:
+ /* writeback started, need to emulate one-shot mode */
+ hw->disable_memwrite(hwdev);
--- /dev/null
+From 5fb9b797d5ccf311ae4aba69e86080d47668b5f7 Mon Sep 17 00:00:00 2001
+From: Sean Paul <seanpaul@chromium.org>
+Date: Wed, 7 Aug 2019 14:51:50 -0400
+Subject: drm/msm/dsi: Fix return value check for clk_get_parent
+
+From: Sean Paul <seanpaul@chromium.org>
+
+commit 5fb9b797d5ccf311ae4aba69e86080d47668b5f7 upstream.
+
+clk_get_parent returns an error pointer upon failure, not NULL. So the
+checks as they exist won't catch a failure. This patch changes the
+checks and the return values to properly handle an error pointer.
+
+Fixes: c4d8cfe516dc ("drm/msm/dsi: add implementation for helper functions")
+Cc: Sibi Sankar <sibis@codeaurora.org>
+Cc: Sean Paul <seanpaul@chromium.org>
+Cc: Rob Clark <robdclark@chromium.org>
+Cc: <stable@vger.kernel.org> # v4.19+
+Signed-off-by: Sean Paul <seanpaul@chromium.org>
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/msm/dsi/dsi_host.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -421,15 +421,15 @@ static int dsi_clk_init(struct msm_dsi_h
+ }
+
+ msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
+- if (!msm_host->byte_clk_src) {
+- ret = -ENODEV;
++ if (IS_ERR(msm_host->byte_clk_src)) {
++ ret = PTR_ERR(msm_host->byte_clk_src);
+ pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
+ goto exit;
+ }
+
+ msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
+- if (!msm_host->pixel_clk_src) {
+- ret = -ENODEV;
++ if (IS_ERR(msm_host->pixel_clk_src)) {
++ ret = PTR_ERR(msm_host->pixel_clk_src);
+ pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
+ goto exit;
+ }
--- /dev/null
+From 698c1aa9f83b618de79e9e5e19a58f70a4a6ae0f Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Fri, 13 Sep 2019 18:03:50 -0400
+Subject: drm/nouveau/kms/nv50-: Don't create MSTMs for eDP connectors
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 698c1aa9f83b618de79e9e5e19a58f70a4a6ae0f upstream.
+
+On the ThinkPad P71, we have one eDP connector exposed along with 5 DP
+connectors, resulting in a total of 11 TMDS encoders. Since the GPU on
+this system is also capable of MST, we create an additional 4 fake MST
+encoders for each DP port. Unfortunately, we also do this for the eDP
+port as well, resulting in:
+
+ 1 eDP port: +1 TMDS encoder
+ +4 DPMST encoders
+ 5 DP ports: +2 TMDS encoders
+ +4 DPMST encoders
+ *5 ports
+ == 35 encoders
+
+Which breaks things, since DRM has a hard coded limit of 32 encoders.
+So, fix this by not creating MSTMs for any eDP connectors. This brings
+us down to 31 encoders, although we can do better.
+
+This fixes driver probing for nouveau on the ThinkPad P71.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/dispnv50/disp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -1603,7 +1603,8 @@ nv50_sor_create(struct drm_connector *co
+ nv_encoder->aux = aux;
+ }
+
+- if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
++ if (nv_connector->type != DCB_CONNECTOR_eDP &&
++ (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
+ ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
+ nv_connector->base.base.id,
--- /dev/null
+From e2c4ed148cf3ec8669a1d90dc66966028e5fad70 Mon Sep 17 00:00:00 2001
+From: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Date: Wed, 2 Oct 2019 15:25:42 +0300
+Subject: drm/omap: fix max fclk divider for omap36xx
+
+From: Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+commit e2c4ed148cf3ec8669a1d90dc66966028e5fad70 upstream.
+
+The OMAP36xx and AM/DM37x TRMs say that the maximum divider for DSS fclk
+(in CM_CLKSEL_DSS) is 32. Experimentation shows that this is not
+correct, and using divider of 32 breaks DSS with a flood or underflows
+and sync losts. Dividers up to 31 seem to work fine.
+
+There is another patch to the DT files to limit the divider correctly,
+but as the DSS driver also needs to know the maximum divider to be able
+to iteratively find good rates, we also need to do the fix in the DSS
+driver.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Cc: Adam Ford <aford173@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://patchwork.freedesktop.org/patch/msgid/20191002122542.8449-1-tomi.valkeinen@ti.com
+Tested-by: Adam Ford <aford173@gmail.com>
+Reviewed-by: Jyri Sarha <jsarha@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/omapdrm/dss/dss.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/omapdrm/dss/dss.c
++++ b/drivers/gpu/drm/omapdrm/dss/dss.c
+@@ -1090,7 +1090,7 @@ static const struct dss_features omap34x
+
+ static const struct dss_features omap3630_dss_feats = {
+ .model = DSS_MODEL_OMAP3,
+- .fck_div_max = 32,
++ .fck_div_max = 31,
+ .fck_freq_max = 173000000,
+ .dss_fck_multiplier = 1,
+ .parent_clk_name = "dpll4_ck",
--- /dev/null
+From f1f028ff89cb0d37db299d48e7b2ce19be040d52 Mon Sep 17 00:00:00 2001
+From: "H. Nikolaus Schaller" <hns@goldelico.com>
+Date: Fri, 20 Sep 2019 18:11:15 +0200
+Subject: DTS: ARM: gta04: introduce legacy spi-cs-high to make display work again
+
+From: H. Nikolaus Schaller <hns@goldelico.com>
+
+commit f1f028ff89cb0d37db299d48e7b2ce19be040d52 upstream.
+
+commit 6953c57ab172 "gpio: of: Handle SPI chipselect legacy bindings"
+
+did introduce logic to centrally handle the legacy spi-cs-high property
+in combination with cs-gpios. This assumes that the polarity
+of the CS has to be inverted if spi-cs-high is missing, even
+and especially if non-legacy GPIO_ACTIVE_HIGH is specified.
+
+The DTS for the GTA04 was orginally introduced under the assumption
+that there is no need for spi-cs-high if the gpio is defined with
+proper polarity GPIO_ACTIVE_HIGH.
+
+This was not a problem until gpiolib changed the interpretation of
+GPIO_ACTIVE_HIGH and missing spi-cs-high.
+
+The effect is that the missing spi-cs-high is now interpreted as CS being
+low (despite GPIO_ACTIVE_HIGH) which turns off the SPI interface when the
+panel is to be programmed by the panel driver.
+
+Therefore, we have to add the redundant and legacy spi-cs-high property
+to properly activate CS.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: H. Nikolaus Schaller <hns@goldelico.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/omap3-gta04.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm/boot/dts/omap3-gta04.dtsi
++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
+@@ -120,6 +120,7 @@
+ spi-max-frequency = <100000>;
+ spi-cpol;
+ spi-cpha;
++ spi-cs-high;
+
+ backlight= <&backlight>;
+ label = "lcd";
--- /dev/null
+From 7fd25e6fc035f4b04b75bca6d7e8daa069603a76 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Thu, 19 Sep 2019 14:12:34 +0200
+Subject: ieee802154: atusb: fix use-after-free at disconnect
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 7fd25e6fc035f4b04b75bca6d7e8daa069603a76 upstream.
+
+The disconnect callback was accessing the hardware-descriptor private
+data after having having freed it.
+
+Fixes: 7490b008d123 ("ieee802154: add support for atusb transceiver")
+Cc: stable <stable@vger.kernel.org> # 4.2
+Cc: Alexander Aring <alex.aring@gmail.com>
+Reported-by: syzbot+f4509a9138a1472e7e80@syzkaller.appspotmail.com
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ieee802154/atusb.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ieee802154/atusb.c
++++ b/drivers/net/ieee802154/atusb.c
+@@ -1137,10 +1137,11 @@ static void atusb_disconnect(struct usb_
+
+ ieee802154_unregister_hw(atusb->hw);
+
++ usb_put_dev(atusb->usb_dev);
++
+ ieee802154_free_hw(atusb->hw);
+
+ usb_set_intfdata(interface, NULL);
+- usb_put_dev(atusb->usb_dev);
+
+ pr_debug("%s done\n", __func__);
+ }
--- /dev/null
+From cf387d9644d8c78721cf9b77af9f67bb5b04da16 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Tue, 10 Sep 2019 11:58:25 +0530
+Subject: libnvdimm/altmap: Track namespace boundaries in altmap
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit cf387d9644d8c78721cf9b77af9f67bb5b04da16 upstream.
+
+With PFN_MODE_PMEM namespace, the memmap area is allocated from the device
+area. Some architectures map the memmap area with large page size. On
+architectures like ppc64, 16MB page for memap mapping can map 262144 pfns.
+This maps a namespace size of 16G.
+
+When populating memmap region with 16MB page from the device area,
+make sure the allocated space is not used to map resources outside this
+namespace. Such usage of device area will prevent a namespace destroy.
+
+Add resource end pnf in altmap and use that to check if the memmap area
+allocation can map pfn outside the namespace. On ppc64 in such case we fallback
+to allocation from memory.
+
+This fix kernel crash reported below:
+
+[ 132.034989] WARNING: CPU: 13 PID: 13719 at mm/memremap.c:133 devm_memremap_pages_release+0x2d8/0x2e0
+[ 133.464754] BUG: Unable to handle kernel data access at 0xc00c00010b204000
+[ 133.464760] Faulting instruction address: 0xc00000000007580c
+[ 133.464766] Oops: Kernel access of bad area, sig: 11 [#1]
+[ 133.464771] LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries
+.....
+[ 133.464901] NIP [c00000000007580c] vmemmap_free+0x2ac/0x3d0
+[ 133.464906] LR [c0000000000757f8] vmemmap_free+0x298/0x3d0
+[ 133.464910] Call Trace:
+[ 133.464914] [c000007cbfd0f7b0] [c0000000000757f8] vmemmap_free+0x298/0x3d0 (unreliable)
+[ 133.464921] [c000007cbfd0f8d0] [c000000000370a44] section_deactivate+0x1a4/0x240
+[ 133.464928] [c000007cbfd0f980] [c000000000386270] __remove_pages+0x3a0/0x590
+[ 133.464935] [c000007cbfd0fa50] [c000000000074158] arch_remove_memory+0x88/0x160
+[ 133.464942] [c000007cbfd0fae0] [c0000000003be8c0] devm_memremap_pages_release+0x150/0x2e0
+[ 133.464949] [c000007cbfd0fb70] [c000000000738ea0] devm_action_release+0x30/0x50
+[ 133.464955] [c000007cbfd0fb90] [c00000000073a5a4] release_nodes+0x344/0x400
+[ 133.464961] [c000007cbfd0fc40] [c00000000073378c] device_release_driver_internal+0x15c/0x250
+[ 133.464968] [c000007cbfd0fc80] [c00000000072fd14] unbind_store+0x104/0x110
+[ 133.464973] [c000007cbfd0fcd0] [c00000000072ee24] drv_attr_store+0x44/0x70
+[ 133.464981] [c000007cbfd0fcf0] [c0000000004a32bc] sysfs_kf_write+0x6c/0xa0
+[ 133.464987] [c000007cbfd0fd10] [c0000000004a1dfc] kernfs_fop_write+0x17c/0x250
+[ 133.464993] [c000007cbfd0fd60] [c0000000003c348c] __vfs_write+0x3c/0x70
+[ 133.464999] [c000007cbfd0fd80] [c0000000003c75d0] vfs_write+0xd0/0x250
+
+djbw: Aneesh notes that this crash can likely be triggered in any kernel that
+supports 'papr_scm', so flagging that commit for -stable consideration.
+
+Fixes: b5beae5e224f ("powerpc/pseries: Add driver for PAPR SCM regions")
+Cc: <stable@vger.kernel.org>
+Reported-by: Sachin Sant <sachinp@linux.vnet.ibm.com>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Reviewed-by: Pankaj Gupta <pagupta@redhat.com>
+Tested-by: Santosh Sivaraj <santosh@fossix.org>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Link: https://lore.kernel.org/r/20190910062826.10041-1-aneesh.kumar@linux.ibm.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/init_64.c | 17 ++++++++++++++++-
+ drivers/nvdimm/pfn_devs.c | 2 ++
+ include/linux/memremap.h | 1 +
+ 3 files changed, 19 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -172,6 +172,21 @@ static __meminit void vmemmap_list_popul
+ vmemmap_list = vmem_back;
+ }
+
++static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
++ unsigned long page_size)
++{
++ unsigned long nr_pfn = page_size / sizeof(struct page);
++ unsigned long start_pfn = page_to_pfn((struct page *)start);
++
++ if ((start_pfn + nr_pfn) > altmap->end_pfn)
++ return true;
++
++ if (start_pfn < altmap->base_pfn)
++ return true;
++
++ return false;
++}
++
+ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap)
+ {
+@@ -194,7 +209,7 @@ int __meminit vmemmap_populate(unsigned
+ * fail due to alignment issues when using 16MB hugepages, so
+ * fall back to system memory if the altmap allocation fail.
+ */
+- if (altmap) {
++ if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
+ p = altmap_alloc_block_buf(page_size, altmap);
+ if (!p)
+ pr_debug("altmap block allocation failed, falling back to system memory");
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -618,9 +618,11 @@ static int __nvdimm_setup_pfn(struct nd_
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ resource_size_t base = nsio->res.start + start_pad;
++ resource_size_t end = nsio->res.end - end_trunc;
+ struct vmem_altmap __altmap = {
+ .base_pfn = init_altmap_base(base),
+ .reserve = init_altmap_reserve(base),
++ .end_pfn = PHYS_PFN(end),
+ };
+
+ memcpy(res, &nsio->res, sizeof(*res));
+--- a/include/linux/memremap.h
++++ b/include/linux/memremap.h
+@@ -17,6 +17,7 @@ struct device;
+ */
+ struct vmem_altmap {
+ const unsigned long base_pfn;
++ const unsigned long end_pfn;
+ const unsigned long reserve;
+ unsigned long free;
+ unsigned long align;
--- /dev/null
+From 674f31a352da5e9f621f757b9a89262f486533a0 Mon Sep 17 00:00:00 2001
+From: Dave Jiang <dave.jiang@intel.com>
+Date: Tue, 24 Sep 2019 10:34:49 -0700
+Subject: libnvdimm: prevent nvdimm from requesting key when security is disabled
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+commit 674f31a352da5e9f621f757b9a89262f486533a0 upstream.
+
+Current implementation attempts to request keys from the keyring even when
+security is not enabled. Change behavior so when security is disabled it
+will skip key request.
+
+Error messages seen when no keys are installed and libnvdimm is loaded:
+
+ request-key[4598]: Cannot find command to construct key 661489677
+ request-key[4606]: Cannot find command to construct key 34713726
+
+Cc: stable@vger.kernel.org
+Fixes: 4c6926a23b76 ("acpi/nfit, libnvdimm: Add unlock of nvdimm support for Intel DIMMs")
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/156934642272.30222.5230162488753445916.stgit@djiang5-desk3.ch.intel.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvdimm/security.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/nvdimm/security.c
++++ b/drivers/nvdimm/security.c
+@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(stru
+ || nvdimm->sec.state < 0)
+ return -EIO;
+
++ /* No need to go further if security is disabled */
++ if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
++ return 0;
++
+ if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
+ dev_dbg(dev, "Security operation in progress.\n");
+ return -EBUSY;
--- /dev/null
+From d8dec42b5c2d2b273bc30b0e073cfbe832d69902 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 1 Oct 2019 13:19:23 +0200
+Subject: mac80211: keep BHs disabled while calling drv_tx_wake_queue()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit d8dec42b5c2d2b273bc30b0e073cfbe832d69902 upstream.
+
+Drivers typically expect this, as it's the case for almost all cases
+where this is called (i.e. from the TX path). Also, the code in mac80211
+itself (if the driver calls ieee80211_tx_dequeue()) expects this as it
+uses this_cpu_ptr() without additional protection.
+
+This should fix various reports of the problem:
+https://bugzilla.kernel.org/show_bug.cgi?id=204127
+https://lore.kernel.org/linux-wireless/CAN5HydrWb3o_FE6A1XDnP1E+xS66d5kiEuhHfiGKkLNQokx13Q@mail.gmail.com/
+https://lore.kernel.org/lkml/nycvar.YFH.7.76.1909111238470.473@cbobk.fhfr.pm/
+
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: Jiri Kosina <jkosina@suse.cz>
+Reported-by: Aaron Hill <aa1ronham@gmail.com>
+Reported-by: Lukas Redlinger <rel+kernel@agilox.net>
+Reported-by: Oleksii Shevchuk <alxchk@gmail.com>
+Fixes: 21a5d4c3a45c ("mac80211: add stop/start logic for software TXQs")
+Link: https://lore.kernel.org/r/1569928763-I3e8838c5ecad878e59d4a94eb069a90f6641461a@changeid
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/util.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -247,7 +247,8 @@ static void __ieee80211_wake_txqs(struct
+ struct sta_info *sta;
+ int i;
+
+- spin_lock_bh(&fq->lock);
++ local_bh_disable();
++ spin_lock(&fq->lock);
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ ps = &sdata->bss->ps;
+@@ -273,9 +274,9 @@ static void __ieee80211_wake_txqs(struct
+ &txqi->flags))
+ continue;
+
+- spin_unlock_bh(&fq->lock);
++ spin_unlock(&fq->lock);
+ drv_wake_tx_queue(local, txqi);
+- spin_lock_bh(&fq->lock);
++ spin_lock(&fq->lock);
+ }
+ }
+
+@@ -288,12 +289,14 @@ static void __ieee80211_wake_txqs(struct
+ (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
+ goto out;
+
+- spin_unlock_bh(&fq->lock);
++ spin_unlock(&fq->lock);
+
+ drv_wake_tx_queue(local, txqi);
++ local_bh_enable();
+ return;
+ out:
+- spin_unlock_bh(&fq->lock);
++ spin_unlock(&fq->lock);
++ local_bh_enable();
+ }
+
+ static void
--- /dev/null
+From d1c536e3177390da43d99f20143b810c35433d1f Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Sun, 22 Sep 2019 11:26:53 +0100
+Subject: mmc: sdhci: improve ADMA error reporting
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+commit d1c536e3177390da43d99f20143b810c35433d1f upstream.
+
+ADMA errors are potentially data corrupting events; although we print
+the register state, we do not usefully print the ADMA descriptors.
+Worse than that, we print them by referencing their virtual address
+which is meaningless when the register state gives us the DMA address
+of the failing descriptor.
+
+Print the ADMA descriptors giving their DMA addresses rather than their
+virtual addresses, and print them using SDHCI_DUMP() rather than DBG().
+
+We also do not show the correct value of the interrupt status register;
+the register dump shows the current value, after we have cleared the
+pending interrupts we are going to service. What is more useful is to
+print the interrupts that _were_ pending at the time the ADMA error was
+encountered. Fix that too.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2857,6 +2857,7 @@ static void sdhci_cmd_irq(struct sdhci_h
+ static void sdhci_adma_show_error(struct sdhci_host *host)
+ {
+ void *desc = host->adma_table;
++ dma_addr_t dma = host->adma_addr;
+
+ sdhci_dumpregs(host);
+
+@@ -2864,18 +2865,21 @@ static void sdhci_adma_show_error(struct
+ struct sdhci_adma2_64_desc *dma_desc = desc;
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+- DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+- desc, le32_to_cpu(dma_desc->addr_hi),
++ SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
++ (unsigned long long)dma,
++ le32_to_cpu(dma_desc->addr_hi),
+ le32_to_cpu(dma_desc->addr_lo),
+ le16_to_cpu(dma_desc->len),
+ le16_to_cpu(dma_desc->cmd));
+ else
+- DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+- desc, le32_to_cpu(dma_desc->addr_lo),
++ SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
++ (unsigned long long)dma,
++ le32_to_cpu(dma_desc->addr_lo),
+ le16_to_cpu(dma_desc->len),
+ le16_to_cpu(dma_desc->cmd));
+
+ desc += host->desc_sz;
++ dma += host->desc_sz;
+
+ if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
+ break;
+@@ -2951,7 +2955,8 @@ static void sdhci_data_irq(struct sdhci_
+ != MMC_BUS_TEST_R)
+ host->data->error = -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR) {
+- pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
++ pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
++ intmask);
+ sdhci_adma_show_error(host);
+ host->data->error = -EIO;
+ if (host->ops->adma_workaround)
--- /dev/null
+From 4ee7dde4c777f14cb0f98dd201491bf6cc15899b Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Mon, 23 Sep 2019 12:08:09 +0200
+Subject: mmc: sdhci: Let drivers define their DMA mask
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 4ee7dde4c777f14cb0f98dd201491bf6cc15899b upstream.
+
+Add host operation ->set_dma_mask() so that drivers can define their own
+DMA masks.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Cc: stable@vger.kernel.org # v4.15 +
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c | 12 ++++--------
+ drivers/mmc/host/sdhci.h | 1 +
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3763,18 +3763,14 @@ int sdhci_setup_host(struct sdhci_host *
+ host->flags &= ~SDHCI_USE_ADMA;
+ }
+
+- /*
+- * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
+- * and *must* do 64-bit DMA. A driver has the opportunity to change
+- * that during the first call to ->enable_dma(). Similarly
+- * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
+- * implement.
+- */
+ if (sdhci_can_64bit_dma(host))
+ host->flags |= SDHCI_USE_64_BIT_DMA;
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+- ret = sdhci_set_dma_mask(host);
++ if (host->ops->set_dma_mask)
++ ret = host->ops->set_dma_mask(host);
++ else
++ ret = sdhci_set_dma_mask(host);
+
+ if (!ret && host->ops->enable_dma)
+ ret = host->ops->enable_dma(host);
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -622,6 +622,7 @@ struct sdhci_ops {
+
+ u32 (*irq)(struct sdhci_host *host, u32 intmask);
+
++ int (*set_dma_mask)(struct sdhci_host *host);
+ int (*enable_dma)(struct sdhci_host *host);
+ unsigned int (*get_max_clock)(struct sdhci_host *host);
+ unsigned int (*get_min_clock)(struct sdhci_host *host);
--- /dev/null
+From 121bd08b029e03404c451bb237729cdff76eafed Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Sun, 22 Sep 2019 11:26:58 +0100
+Subject: mmc: sdhci-of-esdhc: set DMA snooping based on DMA coherence
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+commit 121bd08b029e03404c451bb237729cdff76eafed upstream.
+
+We must not unconditionally set the DMA snoop bit; if the DMA API is
+assuming that the device is not DMA coherent, and the device snoops the
+CPU caches, the device can see stale cache lines brought in by
+speculative prefetch.
+
+This leads to the device seeing stale data, potentially resulting in
+corrupted data transfers. Commonly, this results in a descriptor fetch
+error such as:
+
+mmc0: ADMA error
+mmc0: sdhci: ============ SDHCI REGISTER DUMP ===========
+mmc0: sdhci: Sys addr: 0x00000000 | Version: 0x00002202
+mmc0: sdhci: Blk size: 0x00000008 | Blk cnt: 0x00000001
+mmc0: sdhci: Argument: 0x00000000 | Trn mode: 0x00000013
+mmc0: sdhci: Present: 0x01f50008 | Host ctl: 0x00000038
+mmc0: sdhci: Power: 0x00000003 | Blk gap: 0x00000000
+mmc0: sdhci: Wake-up: 0x00000000 | Clock: 0x000040d8
+mmc0: sdhci: Timeout: 0x00000003 | Int stat: 0x00000001
+mmc0: sdhci: Int enab: 0x037f108f | Sig enab: 0x037f108b
+mmc0: sdhci: ACmd stat: 0x00000000 | Slot int: 0x00002202
+mmc0: sdhci: Caps: 0x35fa0000 | Caps_1: 0x0000af00
+mmc0: sdhci: Cmd: 0x0000333a | Max curr: 0x00000000
+mmc0: sdhci: Resp[0]: 0x00000920 | Resp[1]: 0x001d8a33
+mmc0: sdhci: Resp[2]: 0x325b5900 | Resp[3]: 0x3f400e00
+mmc0: sdhci: Host ctl2: 0x00000000
+mmc0: sdhci: ADMA Err: 0x00000009 | ADMA Ptr: 0x000000236d43820c
+mmc0: sdhci: ============================================
+mmc0: error -5 whilst initialising SD card
+
+but can lead to other errors, and potentially direct the SDHCI
+controller to read/write data to other memory locations (e.g. if a valid
+descriptor is visible to the device in a stale cache line.)
+
+Fix this by ensuring that the DMA snoop bit corresponds with the
+behaviour of the DMA API. Since the driver currently only supports DT,
+use of_dma_is_coherent(). Note that device_get_dma_attr() can not be
+used as that risks re-introducing this bug if/when the driver is
+converted to ACPI.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-esdhc.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -495,7 +495,12 @@ static int esdhc_of_enable_dma(struct sd
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+
+ value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+- value |= ESDHC_DMA_SNOOP;
++
++ if (of_dma_is_coherent(dev->of_node))
++ value |= ESDHC_DMA_SNOOP;
++ else
++ value &= ~ESDHC_DMA_SNOOP;
++
+ sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
+ return 0;
+ }
--- /dev/null
+From b960bc448a252428bacca271f3416a8bda3b599b Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicoleotsuka@gmail.com>
+Date: Mon, 23 Sep 2019 12:08:10 +0200
+Subject: mmc: tegra: Implement ->set_dma_mask()
+
+From: Nicolin Chen <nicoleotsuka@gmail.com>
+
+commit b960bc448a252428bacca271f3416a8bda3b599b upstream.
+
+The SDHCI controller on Tegra186 supports 40-bit addressing, which is
+usually enough to address all of system memory. However, if the SDHCI
+controller is behind an IOMMU, the address space can go beyond. This
+happens on Tegra186 and later where the ARM SMMU has an input address
+space of 48 bits. If the DMA API is backed by this ARM SMMU, the top-
+down IOVA allocator will cause IOV addresses to be returned that the
+SDHCI controller cannot access.
+
+Unfortunately, prior to the introduction of the ->set_dma_mask() host
+operation, the SDHCI core would set either a 64-bit DMA mask if the
+controller claimed to support 64-bit addressing, or a 32-bit DMA mask
+otherwise.
+
+Since the full 64 bits cannot be addressed on Tegra, this had to be
+worked around in commit 68481a7e1c84 ("mmc: tegra: Mark 64 bit dma
+broken on Tegra186") by setting the SDHCI_QUIRK2_BROKEN_64_BIT_DMA
+quirk, which effectively restricts the DMA mask to 32 bits.
+
+One disadvantage of this is that dma_map_*() APIs will now try to use
+the swiotlb to bounce DMA to addresses beyond of the controller's DMA
+mask. This in turn caused degraded performance and can lead to
+situations where the swiotlb buffer is exhausted, which in turn leads
+to DMA transfers to fail.
+
+With the recent introduction of the ->set_dma_mask() host operation,
+this can now be properly fixed. For each generation of Tegra, the exact
+supported DMA mask can be configured. This kills two birds with one
+stone: it avoids the use of bounce buffers because system memory never
+exceeds the addressable memory range of the SDHCI controllers on these
+devices, and at the same time when an IOMMU is involved, it prevents
+IOV addresses from being allocated beyond the addressible range of the
+controllers.
+
+Since the DMA mask is now properly handled, the 64-bit DMA quirk can be
+removed.
+
+Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
+[treding@nvidia.com: provide more background in commit message]
+Tested-by: Nicolin Chen <nicoleotsuka@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Cc: stable@vger.kernel.org # v4.15 +
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-tegra.c | 48 +++++++++++++++++++++++------------------
+ 1 file changed, 28 insertions(+), 20 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <linux/delay.h>
++#include <linux/dma-mapping.h>
+ #include <linux/err.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -104,6 +105,7 @@
+
+ struct sdhci_tegra_soc_data {
+ const struct sdhci_pltfm_data *pdata;
++ u64 dma_mask;
+ u32 nvquirks;
+ u8 min_tap_delay;
+ u8 max_tap_delay;
+@@ -1233,11 +1235,25 @@ static const struct cqhci_host_ops sdhci
+ .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
+ };
+
++static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
++{
++ struct sdhci_pltfm_host *platform = sdhci_priv(host);
++ struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
++ const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
++ struct device *dev = mmc_dev(host->mmc);
++
++ if (soc->dma_mask)
++ return dma_set_mask_and_coherent(dev, soc->dma_mask);
++
++ return 0;
++}
++
+ static const struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
++ .set_dma_mask = tegra_sdhci_set_dma_mask,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .platform_execute_tuning = tegra_sdhci_execute_tuning,
+@@ -1257,6 +1273,7 @@ static const struct sdhci_pltfm_data sdh
+
+ static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
+ .pdata = &sdhci_tegra20_pdata,
++ .dma_mask = DMA_BIT_MASK(32),
+ .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
+ NVQUIRK_ENABLE_BLOCK_GAP_DET,
+ };
+@@ -1283,6 +1300,7 @@ static const struct sdhci_pltfm_data sdh
+
+ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
+ .pdata = &sdhci_tegra30_pdata,
++ .dma_mask = DMA_BIT_MASK(32),
+ .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
+ NVQUIRK_ENABLE_SDR50 |
+ NVQUIRK_ENABLE_SDR104 |
+@@ -1295,6 +1313,7 @@ static const struct sdhci_ops tegra114_s
+ .write_w = tegra_sdhci_writew,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
++ .set_dma_mask = tegra_sdhci_set_dma_mask,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .platform_execute_tuning = tegra_sdhci_execute_tuning,
+@@ -1316,6 +1335,7 @@ static const struct sdhci_pltfm_data sdh
+
+ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
+ .pdata = &sdhci_tegra114_pdata,
++ .dma_mask = DMA_BIT_MASK(32),
+ };
+
+ static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
+@@ -1325,22 +1345,13 @@ static const struct sdhci_pltfm_data sdh
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+- /*
+- * The TRM states that the SD/MMC controller found on
+- * Tegra124 can address 34 bits (the maximum supported by
+- * the Tegra memory controller), but tests show that DMA
+- * to or from above 4 GiB doesn't work. This is possibly
+- * caused by missing programming, though it's not obvious
+- * what sequence is required. Mark 64-bit DMA broken for
+- * now to fix this for existing users (e.g. Nyan boards).
+- */
+- SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .ops = &tegra114_sdhci_ops,
+ };
+
+ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
+ .pdata = &sdhci_tegra124_pdata,
++ .dma_mask = DMA_BIT_MASK(34),
+ };
+
+ static const struct sdhci_ops tegra210_sdhci_ops = {
+@@ -1349,6 +1360,7 @@ static const struct sdhci_ops tegra210_s
+ .write_w = tegra210_sdhci_writew,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
++ .set_dma_mask = tegra_sdhci_set_dma_mask,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+@@ -1369,6 +1381,7 @@ static const struct sdhci_pltfm_data sdh
+
+ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
+ .pdata = &sdhci_tegra210_pdata,
++ .dma_mask = DMA_BIT_MASK(34),
+ .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
+ NVQUIRK_HAS_PADCALIB |
+ NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
+@@ -1383,6 +1396,7 @@ static const struct sdhci_ops tegra186_s
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
++ .set_dma_mask = tegra_sdhci_set_dma_mask,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+@@ -1398,20 +1412,13 @@ static const struct sdhci_pltfm_data sdh
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+- /* SDHCI controllers on Tegra186 support 40-bit addressing.
+- * IOVA addresses are 48-bit wide on Tegra186.
+- * With 64-bit dma mask used for SDHCI, accesses can
+- * be broken. Disable 64-bit dma, which would fall back
+- * to 32-bit dma mask. Ideally 40-bit dma mask would work,
+- * But it is not supported as of now.
+- */
+- SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .ops = &tegra186_sdhci_ops,
+ };
+
+ static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
+ .pdata = &sdhci_tegra186_pdata,
++ .dma_mask = DMA_BIT_MASK(40),
+ .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
+ NVQUIRK_HAS_PADCALIB |
+ NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
+@@ -1424,6 +1431,7 @@ static const struct sdhci_tegra_soc_data
+
+ static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
+ .pdata = &sdhci_tegra186_pdata,
++ .dma_mask = DMA_BIT_MASK(39),
+ .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
+ NVQUIRK_HAS_PADCALIB |
+ NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
--- /dev/null
+From f88eb7c0d002a67ef31aeb7850b42ff69abc46dc Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Fri, 20 Sep 2019 21:54:17 +0200
+Subject: nl80211: validate beacon head
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit f88eb7c0d002a67ef31aeb7850b42ff69abc46dc upstream.
+
+We currently don't validate the beacon head, i.e. the header,
+fixed part and elements that are to go in front of the TIM
+element. This means that the variable elements there can be
+malformed, e.g. have a length exceeding the buffer size, but
+most downstream code from this assumes that this has already
+been checked.
+
+Add the necessary checks to the netlink policy.
+
+Cc: stable@vger.kernel.org
+Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
+Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/nl80211.c | 37 +++++++++++++++++++++++++++++++++++--
+ 1 file changed, 35 insertions(+), 2 deletions(-)
+
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -201,6 +201,38 @@ cfg80211_get_dev_from_info(struct net *n
+ return __cfg80211_rdev_from_attrs(netns, info->attrs);
+ }
+
++static int validate_beacon_head(const struct nlattr *attr,
++ struct netlink_ext_ack *extack)
++{
++ const u8 *data = nla_data(attr);
++ unsigned int len = nla_len(attr);
++ const struct element *elem;
++ const struct ieee80211_mgmt *mgmt = (void *)data;
++ unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
++ u.beacon.variable);
++
++ if (len < fixedlen)
++ goto err;
++
++ if (ieee80211_hdrlen(mgmt->frame_control) !=
++ offsetof(struct ieee80211_mgmt, u.beacon))
++ goto err;
++
++ data += fixedlen;
++ len -= fixedlen;
++
++ for_each_element(elem, data, len) {
++ /* nothing */
++ }
++
++ if (for_each_element_completed(elem, data, len))
++ return 0;
++
++err:
++ NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
++ return -EINVAL;
++}
++
+ static int validate_ie_attr(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+ {
+@@ -322,8 +354,9 @@ const struct nla_policy nl80211_policy[N
+
+ [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
+ [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
+- [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY,
+- .len = IEEE80211_MAX_DATA_LEN },
++ [NL80211_ATTR_BEACON_HEAD] =
++ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head,
++ IEEE80211_MAX_DATA_LEN),
+ [NL80211_ATTR_BEACON_TAIL] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
--- /dev/null
+From 533ca1feed98b0bf024779a14760694c7cb4d431 Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Fri, 2 Aug 2019 22:50:20 +0000
+Subject: PCI: hv: Avoid use of hv_pci_dev->pci_slot after freeing it
+
+From: Dexuan Cui <decui@microsoft.com>
+
+commit 533ca1feed98b0bf024779a14760694c7cb4d431 upstream.
+
+The slot must be removed before the pci_dev is removed, otherwise a panic
+can happen due to use-after-free.
+
+Fixes: 15becc2b56c6 ("PCI: hv: Add hv_pci_remove_slots() when we unload the driver")
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/controller/pci-hyperv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -2701,8 +2701,8 @@ static int hv_pci_remove(struct hv_devic
+ /* Remove the bus from PCI's point of view. */
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(hbus->pci_bus);
+- pci_remove_root_bus(hbus->pci_bus);
+ hv_pci_remove_slots(hbus);
++ pci_remove_root_bus(hbus->pci_bus);
+ pci_unlock_rescan_remove();
+ hbus->state = hv_pcibus_removed;
+ }
--- /dev/null
+From d2182b2d4b71ff0549a07f414d921525fade707b Mon Sep 17 00:00:00 2001
+From: Sumit Saxena <sumit.saxena@broadcom.com>
+Date: Fri, 26 Jul 2019 00:55:52 +0530
+Subject: PCI: Restore Resizable BAR size bits correctly for 1MB BARs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sumit Saxena <sumit.saxena@broadcom.com>
+
+commit d2182b2d4b71ff0549a07f414d921525fade707b upstream.
+
+In a Resizable BAR Control Register, bits 13:8 control the size of the BAR.
+The encoded values of these bits are as follows (see PCIe r5.0, sec
+7.8.6.3):
+
+ Value BAR size
+ 0 1 MB (2^20 bytes)
+ 1 2 MB (2^21 bytes)
+ 2 4 MB (2^22 bytes)
+ ...
+ 43 8 EB (2^63 bytes)
+
+Previously we incorrectly set the BAR size bits for a 1 MB BAR to 0x1f
+instead of 0, so devices that support that size, e.g., new megaraid_sas and
+mpt3sas adapters, fail to initialize during resume from S3 sleep.
+
+Correctly calculate the BAR size bits for Resizable BAR control registers.
+
+Link: https://lore.kernel.org/r/20190725192552.24295-1-sumit.saxena@broadcom.com
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203939
+Fixes: d3252ace0bc6 ("PCI: Restore resized BAR state on resume")
+Signed-off-by: Sumit Saxena <sumit.saxena@broadcom.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1443,7 +1443,7 @@ static void pci_restore_rebar_state(stru
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
+ res = pdev->resource + bar_idx;
+- size = order_base_2((resource_size(res) >> 20) | 1) - 1;
++ size = ilog2(resource_size(res)) - 20;
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
--- /dev/null
+From e3dffa4f6c3612dea337c9c59191bd418afc941b Mon Sep 17 00:00:00 2001
+From: Jon Derrick <jonathan.derrick@intel.com>
+Date: Mon, 16 Sep 2019 07:54:34 -0600
+Subject: PCI: vmd: Fix config addressing when using bus offsets
+
+From: Jon Derrick <jonathan.derrick@intel.com>
+
+commit e3dffa4f6c3612dea337c9c59191bd418afc941b upstream.
+
+VMD maps child device config spaces to the VMD Config BAR linearly
+regardless of the starting bus offset. Because of this, the config
+address decode must ignore starting bus offsets when mapping the BDF to
+the config space address.
+
+Fixes: 2a5a9c9a20f9 ("PCI: vmd: Add offset to bus numbers if necessary")
+Signed-off-by: Jon Derrick <jonathan.derrick@intel.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/controller/vmd.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -94,6 +94,7 @@ struct vmd_dev {
+ struct resource resources[3];
+ struct irq_domain *irq_domain;
+ struct pci_bus *bus;
++ u8 busn_start;
+
+ struct dma_map_ops dma_ops;
+ struct dma_domain dma_domain;
+@@ -440,7 +441,8 @@ static char __iomem *vmd_cfg_addr(struct
+ unsigned int devfn, int reg, int len)
+ {
+ char __iomem *addr = vmd->cfgbar +
+- (bus->number << 20) + (devfn << 12) + reg;
++ ((bus->number - vmd->busn_start) << 20) +
++ (devfn << 12) + reg;
+
+ if ((addr - vmd->cfgbar) + len >=
+ resource_size(&vmd->dev->resource[VMD_CFGBAR]))
+@@ -563,7 +565,7 @@ static int vmd_enable_domain(struct vmd_
+ unsigned long flags;
+ LIST_HEAD(resources);
+ resource_size_t offset[2] = {0};
+- resource_size_t membar2_offset = 0x2000, busn_start = 0;
++ resource_size_t membar2_offset = 0x2000;
+ struct pci_bus *child;
+
+ /*
+@@ -606,14 +608,14 @@ static int vmd_enable_domain(struct vmd_
+ pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
+ if (BUS_RESTRICT_CAP(vmcap) &&
+ (BUS_RESTRICT_CFG(vmconfig) == 0x1))
+- busn_start = 128;
++ vmd->busn_start = 128;
+ }
+
+ res = &vmd->dev->resource[VMD_CFGBAR];
+ vmd->resources[0] = (struct resource) {
+ .name = "VMD CFGBAR",
+- .start = busn_start,
+- .end = busn_start + (resource_size(res) >> 20) - 1,
++ .start = vmd->busn_start,
++ .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
+ .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
+ };
+
+@@ -681,8 +683,8 @@ static int vmd_enable_domain(struct vmd_
+ pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+ pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+
+- vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
+- sd, &resources);
++ vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
++ &vmd_ops, sd, &resources);
+ if (!vmd->bus) {
+ pci_free_resource_list(&resources);
+ irq_domain_remove(vmd->irq_domain);
--- /dev/null
+From a1a30170138c9c5157bd514ccd4d76b47060f29b Mon Sep 17 00:00:00 2001
+From: Jon Derrick <jonathan.derrick@intel.com>
+Date: Mon, 16 Sep 2019 07:54:35 -0600
+Subject: PCI: vmd: Fix shadow offsets to reflect spec changes
+
+From: Jon Derrick <jonathan.derrick@intel.com>
+
+commit a1a30170138c9c5157bd514ccd4d76b47060f29b upstream.
+
+The shadow offset scratchpad was moved to 0x2000-0x2010. Update the
+location to get the correct shadow offset.
+
+Fixes: 6788958e4f3c ("PCI: vmd: Assign membar addresses from shadow registers")
+Signed-off-by: Jon Derrick <jonathan.derrick@intel.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/controller/vmd.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -31,6 +31,9 @@
+ #define PCI_REG_VMLOCK 0x70
+ #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
+
++#define MB2_SHADOW_OFFSET 0x2000
++#define MB2_SHADOW_SIZE 16
++
+ enum vmd_features {
+ /*
+ * Device may contain registers which hint the physical location of the
+@@ -578,7 +581,7 @@ static int vmd_enable_domain(struct vmd_
+ u32 vmlock;
+ int ret;
+
+- membar2_offset = 0x2018;
++ membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
+ ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
+ if (ret || vmlock == ~0)
+ return -ENODEV;
+@@ -590,9 +593,9 @@ static int vmd_enable_domain(struct vmd_
+ if (!membar2)
+ return -ENOMEM;
+ offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
+- readq(membar2 + 0x2008);
++ readq(membar2 + MB2_SHADOW_OFFSET);
+ offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
+- readq(membar2 + 0x2010);
++ readq(membar2 + MB2_SHADOW_OFFSET + 8);
+ pci_iounmap(vmd->dev, membar2);
+ }
+ }
--- /dev/null
+From 443f2d5ba13d65ccfd879460f77941875159d154 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 4 Sep 2019 15:17:38 +0530
+Subject: perf stat: Fix a segmentation fault when using repeat forever
+
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+
+commit 443f2d5ba13d65ccfd879460f77941875159d154 upstream.
+
+Observe a segmentation fault when 'perf stat' is asked to repeat forever
+with the interval option.
+
+Without fix:
+
+ # perf stat -r 0 -I 5000 -e cycles -a sleep 10
+ # time counts unit events
+ 5.000211692 3,13,89,82,34,157 cycles
+ 10.000380119 1,53,98,52,22,294 cycles
+ 10.040467280 17,16,79,265 cycles
+ Segmentation fault
+
+This problem was only observed when we use forever option aka -r 0 and
+works with limited repeats. Calling print_counter with ts being set to
+NULL, is not a correct option when interval is set. Hence avoid
+print_counter(NULL,..) if interval is set.
+
+With fix:
+
+ # perf stat -r 0 -I 5000 -e cycles -a sleep 10
+ # time counts unit events
+ 5.019866622 3,15,14,43,08,697 cycles
+ 10.039865756 3,15,16,31,95,261 cycles
+ 10.059950628 1,26,05,47,158 cycles
+ 5.009902655 3,14,52,62,33,932 cycles
+ 10.019880228 3,14,52,22,89,154 cycles
+ 10.030543876 66,90,18,333 cycles
+ 5.009848281 3,14,51,98,25,437 cycles
+ 10.029854402 3,15,14,93,04,918 cycles
+ 5.009834177 3,14,51,95,92,316 cycles
+
+Committer notes:
+
+Did the 'git bisect' to find the cset introducing the problem to add the
+Fixes tag below, and at that time the problem reproduced as:
+
+ (gdb) run stat -r0 -I500 sleep 1
+ <SNIP>
+ Program received signal SIGSEGV, Segmentation fault.
+ print_interval (prefix=prefix@entry=0x7fffffffc8d0 "", ts=ts@entry=0x0) at builtin-stat.c:866
+ 866 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
+ (gdb) bt
+ #0 print_interval (prefix=prefix@entry=0x7fffffffc8d0 "", ts=ts@entry=0x0) at builtin-stat.c:866
+ #1 0x000000000041860a in print_counters (ts=ts@entry=0x0, argc=argc@entry=2, argv=argv@entry=0x7fffffffd640) at builtin-stat.c:938
+ #2 0x0000000000419a7f in cmd_stat (argc=2, argv=0x7fffffffd640, prefix=<optimized out>) at builtin-stat.c:1411
+ #3 0x000000000045c65a in run_builtin (p=p@entry=0x6291b8 <commands+216>, argc=argc@entry=5, argv=argv@entry=0x7fffffffd640) at perf.c:370
+ #4 0x000000000045c893 in handle_internal_command (argc=5, argv=0x7fffffffd640) at perf.c:429
+ #5 0x000000000045c8f1 in run_argv (argcp=argcp@entry=0x7fffffffd4ac, argv=argv@entry=0x7fffffffd4a0) at perf.c:473
+ #6 0x000000000045cac9 in main (argc=<optimized out>, argv=<optimized out>) at perf.c:588
+ (gdb)
+
+Mostly the same as just before this patch:
+
+ Program received signal SIGSEGV, Segmentation fault.
+ 0x00000000005874a7 in print_interval (config=0xa1f2a0 <stat_config>, evlist=0xbc9b90, prefix=0x7fffffffd1c0 "`", ts=0x0) at util/stat-display.c:964
+ 964 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, config->csv_sep);
+ (gdb) bt
+ #0 0x00000000005874a7 in print_interval (config=0xa1f2a0 <stat_config>, evlist=0xbc9b90, prefix=0x7fffffffd1c0 "`", ts=0x0) at util/stat-display.c:964
+ #1 0x0000000000588047 in perf_evlist__print_counters (evlist=0xbc9b90, config=0xa1f2a0 <stat_config>, _target=0xa1f0c0 <target>, ts=0x0, argc=2, argv=0x7fffffffd670)
+ at util/stat-display.c:1172
+ #2 0x000000000045390f in print_counters (ts=0x0, argc=2, argv=0x7fffffffd670) at builtin-stat.c:656
+ #3 0x0000000000456bb5 in cmd_stat (argc=2, argv=0x7fffffffd670) at builtin-stat.c:1960
+ #4 0x00000000004dd2e0 in run_builtin (p=0xa30e00 <commands+288>, argc=5, argv=0x7fffffffd670) at perf.c:310
+ #5 0x00000000004dd54d in handle_internal_command (argc=5, argv=0x7fffffffd670) at perf.c:362
+ #6 0x00000000004dd694 in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:406
+ #7 0x00000000004dda11 in main (argc=5, argv=0x7fffffffd670) at perf.c:531
+ (gdb)
+
+Fixes: d4f63a4741a8 ("perf stat: Introduce print_counters function")
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Cc: stable@vger.kernel.org # v4.2+
+Link: http://lore.kernel.org/lkml/20190904094738.9558-3-srikar@linux.vnet.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/builtin-stat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1962,7 +1962,7 @@ int cmd_stat(int argc, const char **argv
+ run_idx + 1);
+
+ status = run_perf_stat(argc, argv, run_idx);
+- if (forever && status != -1) {
++ if (forever && status != -1 && !interval) {
+ print_counters(NULL, argc, argv);
+ perf_stat__reset_stats();
+ }
--- /dev/null
+From 0216234c2eed1367a318daeb9f4a97d8217412a0 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Thu, 12 Sep 2019 12:52:35 +0200
+Subject: perf tools: Fix segfault in cpu_cache_level__read()
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 0216234c2eed1367a318daeb9f4a97d8217412a0 upstream.
+
+We release wrong pointer on error path in cpu_cache_level__read
+function, leading to segfault:
+
+ (gdb) r record ls
+ Starting program: /root/perf/tools/perf/perf record ls
+ ...
+ [ perf record: Woken up 1 times to write data ]
+ double free or corruption (out)
+
+ Thread 1 "perf" received signal SIGABRT, Aborted.
+ 0x00007ffff7463798 in raise () from /lib64/power9/libc.so.6
+ (gdb) bt
+ #0 0x00007ffff7463798 in raise () from /lib64/power9/libc.so.6
+ #1 0x00007ffff7443bac in abort () from /lib64/power9/libc.so.6
+ #2 0x00007ffff74af8bc in __libc_message () from /lib64/power9/libc.so.6
+ #3 0x00007ffff74b92b8 in malloc_printerr () from /lib64/power9/libc.so.6
+ #4 0x00007ffff74bb874 in _int_free () from /lib64/power9/libc.so.6
+ #5 0x0000000010271260 in __zfree (ptr=0x7fffffffa0b0) at ../../lib/zalloc..
+ #6 0x0000000010139340 in cpu_cache_level__read (cache=0x7fffffffa090, cac..
+ #7 0x0000000010143c90 in build_caches (cntp=0x7fffffffa118, size=<optimiz..
+ ...
+
+Releasing the proper pointer.
+
+Fixes: 720e98b5faf1 ("perf tools: Add perf data cache feature")
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: stable@vger.kernel.org: # v4.6+
+Link: http://lore.kernel.org/lkml/20190912105235.10689-1-jolsa@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/header.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1061,7 +1061,7 @@ static int cpu_cache_level__read(struct
+
+ scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
+ if (sysfs__read_str(file, &cache->map, &len)) {
+- zfree(&cache->map);
++ zfree(&cache->size);
+ zfree(&cache->type);
+ return -1;
+ }
--- /dev/null
+From 89340d0935c9296c7b8222b6eab30e67cb57ab82 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpengli@tencent.com>
+Date: Mon, 9 Sep 2019 09:40:28 +0800
+Subject: Revert "locking/pvqspinlock: Don't wait if vCPU is preempted"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpengli@tencent.com>
+
+commit 89340d0935c9296c7b8222b6eab30e67cb57ab82 upstream.
+
+This patch reverts commit 75437bb304b20 (locking/pvqspinlock: Don't
+wait if vCPU is preempted). A large performance regression was caused
+by this commit. on over-subscription scenarios.
+
+The test was run on a Xeon Skylake box, 2 sockets, 40 cores, 80 threads,
+with three VMs of 80 vCPUs each. The score of ebizzy -M is reduced from
+13000-14000 records/s to 1700-1800 records/s:
+
+ Host Guest score
+
+vanilla w/o kvm optimizations upstream 1700-1800 records/s
+vanilla w/o kvm optimizations revert 13000-14000 records/s
+vanilla w/ kvm optimizations upstream 4500-5000 records/s
+vanilla w/ kvm optimizations revert 14000-15500 records/s
+
+Exit from aggressive wait-early mechanism can result in premature yield
+and extra scheduling latency.
+
+Actually, only 6% of wait_early events are caused by vcpu_is_preempted()
+being true. However, when one vCPU voluntarily releases its vCPU, all
+the subsequently waiters in the queue will do the same and the cascading
+effect leads to bad performance.
+
+kvm optimizations:
+[1] commit d73eb57b80b (KVM: Boost vCPUs that are delivering interrupts)
+[2] commit 266e85a5ec9 (KVM: X86: Boost queue head vCPU to mitigate lock waiter preemption)
+
+Tested-by: loobinliu@tencent.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Waiman Long <longman@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: loobinliu@tencent.com
+Cc: stable@vger.kernel.org
+Fixes: 75437bb304b20 (locking/pvqspinlock: Don't wait if vCPU is preempted)
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/qspinlock_paravirt.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/locking/qspinlock_paravirt.h
++++ b/kernel/locking/qspinlock_paravirt.h
+@@ -269,7 +269,7 @@ pv_wait_early(struct pv_node *prev, int
+ if ((loop & PV_PREV_CHECK_MASK) != 0)
+ return false;
+
+- return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
++ return READ_ONCE(prev->state) != vcpu_running;
+ }
+
+ /*
--- /dev/null
+From 61129dd29f7962f278b618a2a3e8fdb986a66dc8 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Tue, 17 Sep 2019 09:18:53 +0200
+Subject: sched: Add __ASSEMBLY__ guards around struct clone_args
+
+From: Seth Forshee <seth.forshee@canonical.com>
+
+commit 61129dd29f7962f278b618a2a3e8fdb986a66dc8 upstream.
+
+The addition of struct clone_args to uapi/linux/sched.h is not protected
+by __ASSEMBLY__ guards, causing a failure to build from source for glibc
+on RISC-V. Add the guards to fix this.
+
+Fixes: 7f192e3cd316 ("fork: add clone3")
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Cc: <stable@vger.kernel.org>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/20190917071853.12385-1-seth.forshee@canonical.com
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/uapi/linux/sched.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/uapi/linux/sched.h
++++ b/include/uapi/linux/sched.h
+@@ -33,6 +33,7 @@
+ #define CLONE_NEWNET 0x40000000 /* New network namespace */
+ #define CLONE_IO 0x80000000 /* Clone io context */
+
++#ifndef __ASSEMBLY__
+ /*
+ * Arguments for the clone3 syscall
+ */
+@@ -46,6 +47,7 @@ struct clone_args {
+ __aligned_u64 stack_size;
+ __aligned_u64 tls;
+ };
++#endif
+
+ /*
+ * Scheduling policies
--- /dev/null
+From 3969e76909d3aa06715997896184ee684f68d164 Mon Sep 17 00:00:00 2001
+From: Shuah Khan <skhan@linuxfoundation.org>
+Date: Tue, 24 Sep 2019 13:52:37 -0600
+Subject: selftests: pidfd: Fix undefined reference to pthread_create()
+
+From: Shuah Khan <skhan@linuxfoundation.org>
+
+commit 3969e76909d3aa06715997896184ee684f68d164 upstream.
+
+Fix build failure:
+
+undefined reference to `pthread_create'
+collect2: error: ld returned 1 exit status
+
+Fix CFLAGS to include pthread correctly.
+
+Fixes: 740378dc7834 ("pidfd: add polling selftests")
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20190924195237.30519-1-skhan@linuxfoundation.org
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/pidfd/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/pidfd/Makefile
++++ b/tools/testing/selftests/pidfd/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -g -I../../../../usr/include/ -lpthread
++CFLAGS += -g -I../../../../usr/include/ -pthread
+
+ TEST_GEN_PROGS := pidfd_test pidfd_open_test
+
--- /dev/null
+From 981c107cbb420ee028f8ecd155352cfd6351c246 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Date: Tue, 10 Sep 2019 21:11:37 +0100
+Subject: selftests/tpm2: Add the missing TEST_FILES assignment
+
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+
+commit 981c107cbb420ee028f8ecd155352cfd6351c246 upstream.
+
+The Python files required by the selftests are not packaged because of
+the missing assignment to TEST_FILES. Add the assignment.
+
+Cc: stable@vger.kernel.org
+Fixes: 6ea3dfe1e073 ("selftests: add TPM 2.0 tests")
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Reviewed-by: Petr Vorel <pvorel@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/tpm2/Makefile | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/testing/selftests/tpm2/Makefile
++++ b/tools/testing/selftests/tpm2/Makefile
+@@ -2,3 +2,4 @@
+ include ../lib.mk
+
+ TEST_PROGS := test_smoke.sh test_space.sh
++TEST_FILES := tpm2.py tpm2_tests.py
tracing-make-sure-variable-reference-alias-has-correct-var_ref_idx.patch
usercopy-avoid-highmem-pfn-warning.patch
timer-read-jiffies-once-when-forwarding-base-clk.patch
+pci-vmd-fix-config-addressing-when-using-bus-offsets.patch
+pci-hv-avoid-use-of-hv_pci_dev-pci_slot-after-freeing-it.patch
+pci-vmd-fix-shadow-offsets-to-reflect-spec-changes.patch
+pci-restore-resizable-bar-size-bits-correctly-for-1mb-bars.patch
+selftests-tpm2-add-the-missing-test_files-assignment.patch
+selftests-pidfd-fix-undefined-reference-to-pthread_create.patch
+watchdog-imx2_wdt-fix-min-calculation-in-imx2_wdt_set_timeout.patch
+perf-tools-fix-segfault-in-cpu_cache_level__read.patch
+perf-stat-fix-a-segmentation-fault-when-using-repeat-forever.patch
+drm-i915-dp-fix-dsc-bpp-calculations-v5.patch
+drm-atomic-reject-flip_async-unconditionally.patch
+drm-atomic-take-the-atomic-toys-away-from-x.patch
+drm-mali-dp-mark-expected-switch-fall-through.patch
+drm-omap-fix-max-fclk-divider-for-omap36xx.patch
+drm-msm-dsi-fix-return-value-check-for-clk_get_parent.patch
+drm-nouveau-kms-nv50-don-t-create-mstms-for-edp-connectors.patch
+drm-amd-powerplay-change-metrics-update-period-from-1ms-to-100ms.patch
+drm-i915-gvt-update-vgpu-workload-head-pointer-correctly.patch
+drm-i915-userptr-acquire-the-page-lock-around-set_page_dirty.patch
+drm-i915-use-maximum-write-flush-for-pwrite_gtt.patch
+drm-i915-flush-extra-hard-after-writing-relocations-through-the-gtt.patch
+drm-i915-to-make-vgpu-ppgtt-notificaiton-as-atomic-operation.patch
+mac80211-keep-bhs-disabled-while-calling-drv_tx_wake_queue.patch
+mmc-tegra-implement-set_dma_mask.patch
+mmc-sdhci-improve-adma-error-reporting.patch
+mmc-sdhci-of-esdhc-set-dma-snooping-based-on-dma-coherence.patch
+mmc-sdhci-let-drivers-define-their-dma-mask.patch
+revert-locking-pvqspinlock-don-t-wait-if-vcpu-is-preempted.patch
+libnvdimm-altmap-track-namespace-boundaries-in-altmap.patch
+libnvdimm-prevent-nvdimm-from-requesting-key-when-security-is-disabled.patch
+sched-add-__assembly__-guards-around-struct-clone_args.patch
+dts-arm-gta04-introduce-legacy-spi-cs-high-to-make-display-work-again.patch
+xen-balloon-set-pages-pageoffline-in-balloon_add_region.patch
+xen-xenbus-fix-self-deadlock-after-killing-user-process.patch
+ieee802154-atusb-fix-use-after-free-at-disconnect.patch
+nl80211-validate-beacon-head.patch
+cfg80211-validate-ssid-mbssid-element-ordering-assumption.patch
+cfg80211-initialize-on-stack-chandefs.patch
--- /dev/null
+From 144783a80cd2cbc45c6ce17db649140b65f203dd Mon Sep 17 00:00:00 2001
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Date: Mon, 12 Aug 2019 15:13:56 +0200
+Subject: watchdog: imx2_wdt: fix min() calculation in imx2_wdt_set_timeout
+
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+
+commit 144783a80cd2cbc45c6ce17db649140b65f203dd upstream.
+
+Converting from ms to s requires dividing by 1000, not multiplying. So
+this is currently taking the smaller of new_timeout and 1.28e8,
+i.e. effectively new_timeout.
+
+The driver knows what it set max_hw_heartbeat_ms to, so use that
+value instead of doing a division at run-time.
+
+FWIW, this can easily be tested by booting into a busybox shell and
+doing "watchdog -t 5 -T 130 /dev/watchdog" - without this patch, the
+watchdog fires after 130&127 == 2 seconds.
+
+Fixes: b07e228eee69 "watchdog: imx2_wdt: Fix set_timeout for big timeout values"
+Cc: stable@vger.kernel.org # 5.2 plus anything the above got backported to
+Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lore.kernel.org/r/20190812131356.23039-1-linux@rasmusvillemoes.dk
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Wim Van Sebroeck <wim@linux-watchdog.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/watchdog/imx2_wdt.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/watchdog/imx2_wdt.c
++++ b/drivers/watchdog/imx2_wdt.c
+@@ -55,7 +55,7 @@
+
+ #define IMX2_WDT_WMCR 0x08 /* Misc Register */
+
+-#define IMX2_WDT_MAX_TIME 128
++#define IMX2_WDT_MAX_TIME 128U
+ #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
+
+ #define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
+@@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct w
+ {
+ unsigned int actual;
+
+- actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
++ actual = min(new_timeout, IMX2_WDT_MAX_TIME);
+ __imx2_wdt_set_timeout(wdog, actual);
+ wdog->timeout = new_timeout;
+ return 0;
--- /dev/null
+From c5ad81eb029570c5ca5859539b0679f07a776d25 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Fri, 27 Sep 2019 17:46:28 +0200
+Subject: xen/balloon: Set pages PageOffline() in balloon_add_region()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Hildenbrand <david@redhat.com>
+
+commit c5ad81eb029570c5ca5859539b0679f07a776d25 upstream.
+
+We are missing a __SetPageOffline(), which is why we can get
+!PageOffline() pages onto the balloon list, where
+alloc_xenballooned_pages() will complain:
+
+page:ffffea0003e7ffc0 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0
+flags: 0xffffe00001000(reserved)
+raw: 000ffffe00001000 dead000000000100 dead000000000200 0000000000000000
+raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000
+page dumped because: VM_BUG_ON_PAGE(!PageOffline(page))
+------------[ cut here ]------------
+kernel BUG at include/linux/page-flags.h:744!
+invalid opcode: 0000 [#1] SMP NOPTI
+
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Tested-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Fixes: 77c4adf6a6df ("xen/balloon: mark inflated pages PG_offline")
+Cc: stable@vger.kernel.org # v5.1+
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Stefano Stabellini <sstabellini@kernel.org>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/balloon.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -688,6 +688,7 @@ static void __init balloon_add_region(un
+ /* totalram_pages and totalhigh_pages do not
+ include the boot-time balloon extension, so
+ don't subtract from it. */
++ __SetPageOffline(page);
+ __balloon_append(page);
+ }
+
--- /dev/null
+From a8fabb38525c51a094607768bac3ba46b3f4a9d5 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 1 Oct 2019 17:03:55 +0200
+Subject: xen/xenbus: fix self-deadlock after killing user process
+
+From: Juergen Gross <jgross@suse.com>
+
+commit a8fabb38525c51a094607768bac3ba46b3f4a9d5 upstream.
+
+In case a user process using xenbus has open transactions and is killed
+e.g. via ctrl-C the following cleanup of the allocated resources might
+result in a deadlock due to trying to end a transaction in the xenbus
+worker thread:
+
+[ 2551.474706] INFO: task xenbus:37 blocked for more than 120 seconds.
+[ 2551.492215] Tainted: P OE 5.0.0-29-generic #5
+[ 2551.510263] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[ 2551.528585] xenbus D 0 37 2 0x80000080
+[ 2551.528590] Call Trace:
+[ 2551.528603] __schedule+0x2c0/0x870
+[ 2551.528606] ? _cond_resched+0x19/0x40
+[ 2551.528632] schedule+0x2c/0x70
+[ 2551.528637] xs_talkv+0x1ec/0x2b0
+[ 2551.528642] ? wait_woken+0x80/0x80
+[ 2551.528645] xs_single+0x53/0x80
+[ 2551.528648] xenbus_transaction_end+0x3b/0x70
+[ 2551.528651] xenbus_file_free+0x5a/0x160
+[ 2551.528654] xenbus_dev_queue_reply+0xc4/0x220
+[ 2551.528657] xenbus_thread+0x7de/0x880
+[ 2551.528660] ? wait_woken+0x80/0x80
+[ 2551.528665] kthread+0x121/0x140
+[ 2551.528667] ? xb_read+0x1d0/0x1d0
+[ 2551.528670] ? kthread_park+0x90/0x90
+[ 2551.528673] ret_from_fork+0x35/0x40
+
+Fix this by doing the cleanup via a workqueue instead.
+
+Reported-by: James Dingwall <james@dingwall.me.uk>
+Fixes: fd8aa9095a95c ("xen: optimize xenbus driver for multiple concurrent xenstore accesses")
+Cc: <stable@vger.kernel.org> # 4.11
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/xenbus/xenbus_dev_frontend.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -55,6 +55,7 @@
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ #include <linux/miscdevice.h>
++#include <linux/workqueue.h>
+
+ #include <xen/xenbus.h>
+ #include <xen/xen.h>
+@@ -116,6 +117,8 @@ struct xenbus_file_priv {
+ wait_queue_head_t read_waitq;
+
+ struct kref kref;
++
++ struct work_struct wq;
+ };
+
+ /* Read out any raw xenbus messages queued up. */
+@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_wa
+ mutex_unlock(&adap->dev_data->reply_mutex);
+ }
+
+-static void xenbus_file_free(struct kref *kref)
++static void xenbus_worker(struct work_struct *wq)
+ {
+ struct xenbus_file_priv *u;
+ struct xenbus_transaction_holder *trans, *tmp;
+ struct watch_adapter *watch, *tmp_watch;
+ struct read_buffer *rb, *tmp_rb;
+
+- u = container_of(kref, struct xenbus_file_priv, kref);
++ u = container_of(wq, struct xenbus_file_priv, wq);
+
+ /*
+ * No need for locking here because there are no other users,
+@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref
+ kfree(u);
+ }
+
++static void xenbus_file_free(struct kref *kref)
++{
++ struct xenbus_file_priv *u;
++
++ /*
++ * We might be called in xenbus_thread().
++ * Use workqueue to avoid deadlock.
++ */
++ u = container_of(kref, struct xenbus_file_priv, kref);
++ schedule_work(&u->wq);
++}
++
+ static struct xenbus_transaction_holder *xenbus_get_transaction(
+ struct xenbus_file_priv *u, uint32_t tx_id)
+ {
+@@ -650,6 +665,7 @@ static int xenbus_file_open(struct inode
+ INIT_LIST_HEAD(&u->watches);
+ INIT_LIST_HEAD(&u->read_buffers);
+ init_waitqueue_head(&u->read_waitq);
++ INIT_WORK(&u->wq, xenbus_worker);
+
+ mutex_init(&u->reply_mutex);
+ mutex_init(&u->msgbuffer_mutex);