--- /dev/null
+From 45103d67c06003d96f46e7850334e01df3d2d0ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Apr 2020 16:04:40 -0700
+Subject: drm/i915/display: Load DP_TP_CTL/STATUS offset before use it
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Roberto de Souza <jose.souza@intel.com>
+
+[ Upstream commit 12399028751b887bdc2515f1a1e2c81b4fd74085 ]
+
+Right now dp.regs.dp_tp_ctl/status are only set during the encoder
+pre_enable() hook, what is causing all reads and writes to those
+registers to go to offset 0x0 before pre_enable() is executed.
+
+So if i915 takes the BIOS state and don't do a modeset any following
+link retraing will fail.
+
+In the case that i915 needs to do a modeset, the DDI disable sequence
+will write to a wrong register not disabling DP 'Transport Enable' in
+DP_TP_CTL, making a HDMI modeset in the same port/transcoder to
+not light up the monitor.
+
+So here for GENs older than 12, that have those registers fixed at
+port offset range it is loading at encoder/port init while for GEN12
+it will keep setting it at encoder pre_enable() and during HW state
+readout.
+
+Fixes: 4444df6e205b ("drm/i915/tgl: move DP_TP_* to transcoder")
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200414230442.262092-1-jose.souza@intel.com
+(cherry picked from commit edcb9028d66b44d74ba4f8b9daa379b004dc1f85)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_ddi.c | 14 +++++++++++---
+ drivers/gpu/drm/i915/display/intel_dp.c | 5 ++---
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 2fe594952748d..d3c58026d55e6 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3545,9 +3545,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+ crtc_state->lane_count, is_mst);
+
+- intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+- intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+-
+ intel_edp_panel_on(intel_dp);
+
+ intel_ddi_clk_select(encoder, crtc_state);
+@@ -4269,12 +4266,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 temp, flags = 0;
+
+ /* XXX: DSI transcoder paranoia */
+ if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+ return;
+
++ if (INTEL_GEN(dev_priv) >= 12) {
++ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder);
++ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder);
++ }
++
+ intel_dsc_get_config(encoder, pipe_config);
+
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+@@ -4492,6 +4495,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
+ static struct intel_connector *
+ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+ {
++ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct intel_connector *connector;
+ enum port port = intel_dig_port->base.port;
+
+@@ -4502,6 +4506,10 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ intel_dig_port->dp.prepare_link_retrain =
+ intel_ddi_prepare_link_retrain;
++ if (INTEL_GEN(dev_priv) < 12) {
++ intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
++ intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
++ }
+
+ if (!intel_dp_init_connector(intel_dig_port, connector)) {
+ kfree(connector);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index c7424e2a04a35..fa3a9e9e0b290 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2492,9 +2492,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
+ intel_crtc_has_type(pipe_config,
+ INTEL_OUTPUT_DP_MST));
+
+- intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+- intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+-
+ /*
+ * There are four kinds of DP registers:
+ *
+@@ -7616,6 +7613,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
+
+ intel_dig_port->dp.output_reg = output_reg;
+ intel_dig_port->max_lanes = 4;
++ intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
++ intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+
+ intel_encoder->type = INTEL_OUTPUT_DP;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
+--
+2.20.1
+
--- /dev/null
+From 678f148204a458cbc1a6e671a291c7025208bd86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Apr 2020 14:11:17 -0700
+Subject: drm/i915/tgl: Add Wa_14010477008:tgl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+[ Upstream commit 81fdd7bfeb8e8f76bcdfef9174ec580707c37d38 ]
+
+Media decompression support should not be advertised on any display
+planes for steppings A0-C0.
+
+Bspec: 53273
+Fixes: 2dfbf9d2873a ("drm/i915/tgl: Gen-12 display can decompress surfaces compressed by the media engine")
+Cc: Matt Atwood <matthew.s.atwood@intel.com>
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200414211118.2787489-3-matthew.d.roper@intel.com
+Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
+(cherry picked from commit dbff5a8db9c630f61a892ab41a283445e01270f5)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_sprite.c | 17 ++++++++++++-----
+ drivers/gpu/drm/i915/i915_drv.h | 2 ++
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
+index fca77ec1e0ddf..f55404a94eba6 100644
+--- a/drivers/gpu/drm/i915/display/intel_sprite.c
++++ b/drivers/gpu/drm/i915/display/intel_sprite.c
+@@ -2754,19 +2754,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ }
+ }
+
+-static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
++static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
++ enum plane_id plane_id)
+ {
++ /* Wa_14010477008:tgl[a0..c0] */
++ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
++ return false;
++
+ return plane_id < PLANE_SPRITE4;
+ }
+
+ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+ {
++ struct drm_i915_private *dev_priv = to_i915(_plane->dev);
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+- if (!gen12_plane_supports_mc_ccs(plane->id))
++ if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
+ return false;
+ /* fall through */
+ case DRM_FORMAT_MOD_LINEAR:
+@@ -2935,9 +2941,10 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+ }
+ }
+
+-static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
++static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
++ enum plane_id plane_id)
+ {
+- if (gen12_plane_supports_mc_ccs(plane_id))
++ if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
+ return gen12_plane_format_modifiers_mc_ccs;
+ else
+ return gen12_plane_format_modifiers_rc_ccs;
+@@ -3008,7 +3015,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
+
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (INTEL_GEN(dev_priv) >= 12) {
+- modifiers = gen12_get_plane_modifiers(plane_id);
++ modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
+ plane_funcs = &gen12_plane_funcs;
+ } else {
+ if (plane->has_ccs)
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 810e3ccd56ecb..dff1342651126 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1601,6 +1601,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
+ (IS_ICELAKE(p) && IS_REVID(p, since, until))
+
+ #define TGL_REVID_A0 0x0
++#define TGL_REVID_B0 0x1
++#define TGL_REVID_C0 0x2
+
+ #define IS_TGL_REVID(p, since, until) \
+ (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+--
+2.20.1
+
--- /dev/null
+From ad9cc9e508e5713ee4c4f3288b78e22265d4d8e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Apr 2020 16:34:34 -0700
+Subject: drm/i915/tgl: TBT AUX should use TC power well ops
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+[ Upstream commit 335f62e7606a7921775d7cc73f0ad8ffd899bc22 ]
+
+As on ICL, we want to use the Type-C aux handlers for the TBT aux wells
+to ensure the DP_AUX_CH_CTL_TBT_IO flag is set properly.
+
+Fixes: 656409bbaf87 ("drm/i915/tgl: Add power well support")
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Cc: Imre Deak <imre.deak@intel.com>
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200415233435.3064257-1-matthew.d.roper@intel.com
+Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
+(cherry picked from commit 3cbdb97564a39020262e62b655e788b63cf426cb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_display_power.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
+index 46c40db992dd7..5895b8c7662e3 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_power.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
+@@ -4068,7 +4068,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "AUX D TBT1",
+ .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
+- .ops = &hsw_power_well_ops,
++ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+@@ -4079,7 +4079,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "AUX E TBT2",
+ .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
+- .ops = &hsw_power_well_ops,
++ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+@@ -4090,7 +4090,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "AUX F TBT3",
+ .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
+- .ops = &hsw_power_well_ops,
++ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+@@ -4101,7 +4101,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "AUX G TBT4",
+ .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
+- .ops = &hsw_power_well_ops,
++ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+@@ -4112,7 +4112,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "AUX H TBT5",
+ .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
+- .ops = &hsw_power_well_ops,
++ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+@@ -4123,7 +4123,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "AUX I TBT6",
+ .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
+- .ops = &hsw_power_well_ops,
++ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+--
+2.20.1
+
--- /dev/null
+From ad5c82469ba955a6d3daf4eaf6b4d594155dfe53 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 23:30:49 +0200
+Subject: drop_monitor: work around gcc-10 stringop-overflow warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit dc30b4059f6e2abf3712ab537c8718562b21c45d ]
+
+The current gcc-10 snapshot produces a false-positive warning:
+
+net/core/drop_monitor.c: In function 'trace_drop_common.constprop':
+cc1: error: writing 8 bytes into a region of size 0 [-Werror=stringop-overflow=]
+In file included from net/core/drop_monitor.c:23:
+include/uapi/linux/net_dropmon.h:36:8: note: at offset 0 to object 'entries' with size 4 declared here
+ 36 | __u32 entries;
+ | ^~~~~~~
+
+I reported this in the gcc bugzilla, but in case it does not get
+fixed in the release, work around it by using a temporary variable.
+
+Fixes: 9a8afc8d3962 ("Network Drop Monitor: Adding drop monitor implementation & Netlink protocol")
+Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94881
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/drop_monitor.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 31700e0c39283..04d8e8779384d 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -212,6 +212,7 @@ static void sched_send_work(struct timer_list *t)
+ static void trace_drop_common(struct sk_buff *skb, void *location)
+ {
+ struct net_dm_alert_msg *msg;
++ struct net_dm_drop_point *point;
+ struct nlmsghdr *nlh;
+ struct nlattr *nla;
+ int i;
+@@ -230,11 +231,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ nlh = (struct nlmsghdr *)dskb->data;
+ nla = genlmsg_data(nlmsg_data(nlh));
+ msg = nla_data(nla);
++ point = msg->points;
+ for (i = 0; i < msg->entries; i++) {
+- if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
+- msg->points[i].count++;
++ if (!memcmp(&location, &point->pc, sizeof(void *))) {
++ point->count++;
+ goto out;
+ }
++ point++;
+ }
+ if (msg->entries == dm_hit_limit)
+ goto out;
+@@ -243,8 +246,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ */
+ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
+ nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
+- memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
+- msg->points[msg->entries].count = 1;
++ memcpy(point->pc, &location, sizeof(void *));
++ point->count = 1;
+ msg->entries++;
+
+ if (!timer_pending(&data->send_timer)) {
+--
+2.20.1
+
--- /dev/null
+From 9ab5c0e3dc7ab97b9b432d237bd3cb7f1da80c00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Feb 2020 09:33:29 +0000
+Subject: ftrace/selftests: workaround cgroup RT scheduling issues
+
+From: Alan Maguire <alan.maguire@oracle.com>
+
+[ Upstream commit 57c4cfd4a2eef8f94052bd7c0fce0981f74fb213 ]
+
+wakeup_rt.tc and wakeup.tc tests in tracers/ subdirectory
+fail due to the chrt command returning:
+
+ chrt: failed to set pid 0's policy: Operation not permitted.
+
+To work around this, temporarily disable grout RT scheduling
+during ftracetest execution. Restore original value on
+test run completion. With these changes in place, both
+tests consistently pass.
+
+Fixes: c575dea2c1a5 ("selftests/ftrace: Add wakeup_rt tracer testcase")
+Fixes: c1edd060b413 ("selftests/ftrace: Add wakeup tracer testcase")
+Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/ftrace/ftracetest | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
+index 063ecb290a5a3..144308a757b70 100755
+--- a/tools/testing/selftests/ftrace/ftracetest
++++ b/tools/testing/selftests/ftrace/ftracetest
+@@ -29,8 +29,25 @@ err_ret=1
+ # kselftest skip code is 4
+ err_skip=4
+
++# cgroup RT scheduling prevents chrt commands from succeeding, which
++# induces failures in test wakeup tests. Disable for the duration of
++# the tests.
++
++readonly sched_rt_runtime=/proc/sys/kernel/sched_rt_runtime_us
++
++sched_rt_runtime_orig=$(cat $sched_rt_runtime)
++
++setup() {
++ echo -1 > $sched_rt_runtime
++}
++
++cleanup() {
++ echo $sched_rt_runtime_orig > $sched_rt_runtime
++}
++
+ errexit() { # message
+ echo "Error: $1" 1>&2
++ cleanup
+ exit $err_ret
+ }
+
+@@ -39,6 +56,8 @@ if [ `id -u` -ne 0 ]; then
+ errexit "this must be run by root user"
+ fi
+
++setup
++
+ # Utilities
+ absdir() { # file_path
+ (cd `dirname $1`; pwd)
+@@ -235,6 +254,7 @@ TOTAL_RESULT=0
+
+ INSTANCE=
+ CASENO=0
++
+ testcase() { # testfile
+ CASENO=$((CASENO+1))
+ desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
+@@ -406,5 +426,7 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
+ prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
+ prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
+
++cleanup
++
+ # if no error, return 0
+ exit $TOTAL_RESULT
+--
+2.20.1
+
--- /dev/null
+From f4199480b4287194477a8ec0602109c185e342f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 11 Apr 2020 20:33:52 -0500
+Subject: gpio: pca953x: Fix pca953x_gpio_set_config
+
+From: Adam Ford <aford173@gmail.com>
+
+[ Upstream commit dc87f6dd058a648cd2a35e4aa04592dccdc9f0c2 ]
+
+pca953x_gpio_set_config is setup to support pull-up/down
+bias. Currently the driver uses a variable called 'config' to
+determine which options to use. Unfortunately, this is incorrect.
+
+This patch uses function pinconf_to_config_param(config), which
+converts this 'config' parameter back to pinconfig to determine
+which option to use.
+
+Fixes: 15add06841a3 ("gpio: pca953x: add ->set_config implementation")
+Signed-off-by: Adam Ford <aford173@gmail.com>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-pca953x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 5638b4e5355f1..4269ea9a817e6 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -531,7 +531,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ {
+ struct pca953x_chip *chip = gpiochip_get_data(gc);
+
+- switch (config) {
++ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return pca953x_gpio_set_pull_up_down(chip, offset, config);
+--
+2.20.1
+
--- /dev/null
+From d0c067167e080cfc76c63a5cab78ddd6dc4279c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 10:54:56 -0700
+Subject: hv_netvsc: Fix netvsc_start_xmit's return type
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+[ Upstream commit 7fdc66debebc6a7170a37c8c9b0d9585a9788fb4 ]
+
+netvsc_start_xmit is used as a callback function for the ndo_start_xmit
+function pointer. ndo_start_xmit's return type is netdev_tx_t but
+netvsc_start_xmit's return type is int.
+
+This causes a failure with Control Flow Integrity (CFI), which requires
+function pointer prototypes and callback function definitions to match
+exactly. When CFI is in enforcing, the kernel panics. When booting a
+CFI kernel with WSL 2, the VM is immediately terminated because of this.
+
+The splat when CONFIG_CFI_PERMISSIVE is used:
+
+[ 5.916765] CFI failure (target: netvsc_start_xmit+0x0/0x10):
+[ 5.916771] WARNING: CPU: 8 PID: 0 at kernel/cfi.c:29 __cfi_check_fail+0x2e/0x40
+[ 5.916772] Modules linked in:
+[ 5.916774] CPU: 8 PID: 0 Comm: swapper/8 Not tainted 5.7.0-rc3-next-20200424-microsoft-cbl-00001-ged4eb37d2c69-dirty #1
+[ 5.916776] RIP: 0010:__cfi_check_fail+0x2e/0x40
+[ 5.916777] Code: 48 c7 c7 70 98 63 a9 48 c7 c6 11 db 47 a9 e8 69 55 59 00 85 c0 75 02 5b c3 48 c7 c7 73 c6 43 a9 48 89 de 31 c0 e8 12 2d f0 ff <0f> 0b 5b c3 00 00 cc cc 00 00 cc cc 00 00 cc cc 00 00 85 f6 74 25
+[ 5.916778] RSP: 0018:ffffa803c0260b78 EFLAGS: 00010246
+[ 5.916779] RAX: 712a1af25779e900 RBX: ffffffffa8cf7950 RCX: ffffffffa962cf08
+[ 5.916779] RDX: ffffffffa9c36b60 RSI: 0000000000000082 RDI: ffffffffa9c36b5c
+[ 5.916780] RBP: ffff8ffc4779c2c0 R08: 0000000000000001 R09: ffffffffa9c3c300
+[ 5.916781] R10: 0000000000000151 R11: ffffffffa9c36b60 R12: ffff8ffe39084000
+[ 5.916782] R13: ffffffffa8cf7950 R14: ffffffffa8d12cb0 R15: ffff8ffe39320140
+[ 5.916784] FS: 0000000000000000(0000) GS:ffff8ffe3bc00000(0000) knlGS:0000000000000000
+[ 5.916785] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 5.916786] CR2: 00007ffef5749408 CR3: 00000002f4f5e000 CR4: 0000000000340ea0
+[ 5.916787] Call Trace:
+[ 5.916788] <IRQ>
+[ 5.916790] __cfi_check+0x3ab58/0x450e0
+[ 5.916793] ? dev_hard_start_xmit+0x11f/0x160
+[ 5.916795] ? sch_direct_xmit+0xf2/0x230
+[ 5.916796] ? __dev_queue_xmit.llvm.11471227737707190958+0x69d/0x8e0
+[ 5.916797] ? neigh_resolve_output+0xdf/0x220
+[ 5.916799] ? neigh_connected_output.cfi_jt+0x8/0x8
+[ 5.916801] ? ip6_finish_output2+0x398/0x4c0
+[ 5.916803] ? nf_nat_ipv6_out+0x10/0xa0
+[ 5.916804] ? nf_hook_slow+0x84/0x100
+[ 5.916807] ? ip6_input_finish+0x8/0x8
+[ 5.916807] ? ip6_output+0x6f/0x110
+[ 5.916808] ? __ip6_local_out.cfi_jt+0x8/0x8
+[ 5.916810] ? mld_sendpack+0x28e/0x330
+[ 5.916811] ? ip_rt_bug+0x8/0x8
+[ 5.916813] ? mld_ifc_timer_expire+0x2db/0x400
+[ 5.916814] ? neigh_proxy_process+0x8/0x8
+[ 5.916816] ? call_timer_fn+0x3d/0xd0
+[ 5.916817] ? __run_timers+0x2a9/0x300
+[ 5.916819] ? rcu_core_si+0x8/0x8
+[ 5.916820] ? run_timer_softirq+0x14/0x30
+[ 5.916821] ? __do_softirq+0x154/0x262
+[ 5.916822] ? native_x2apic_icr_write+0x8/0x8
+[ 5.916824] ? irq_exit+0xba/0xc0
+[ 5.916825] ? hv_stimer0_vector_handler+0x99/0xe0
+[ 5.916826] ? hv_stimer0_callback_vector+0xf/0x20
+[ 5.916826] </IRQ>
+[ 5.916828] ? hv_stimer_global_cleanup.cfi_jt+0x8/0x8
+[ 5.916829] ? raw_setsockopt+0x8/0x8
+[ 5.916830] ? default_idle+0xe/0x10
+[ 5.916832] ? do_idle.llvm.10446269078108580492+0xb7/0x130
+[ 5.916833] ? raw_setsockopt+0x8/0x8
+[ 5.916833] ? cpu_startup_entry+0x15/0x20
+[ 5.916835] ? cpu_hotplug_enable.cfi_jt+0x8/0x8
+[ 5.916836] ? start_secondary+0x188/0x190
+[ 5.916837] ? secondary_startup_64+0xa5/0xb0
+[ 5.916838] ---[ end trace f2683fa869597ba5 ]---
+
+Avoid this by using the right return type for netvsc_start_xmit.
+
+Fixes: fceaf24a943d8 ("Staging: hv: add the Hyper-V virtual network driver")
+Link: https://github.com/ClangBuiltLinux/linux/issues/1009
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/hyperv/netvsc_drv.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 2c0a24c606fc7..28a5d46ad5266 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -710,7 +710,8 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
+ goto drop;
+ }
+
+-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
++ struct net_device *ndev)
+ {
+ return netvsc_xmit(skb, ndev, false);
+ }
+--
+2.20.1
+
--- /dev/null
+From 4c6f14a5592e54daeaed313c3192942dedbf0c33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 14:54:09 +0200
+Subject: iommu/amd: Fix race in increase_address_space()/fetch_pte()
+
+From: Joerg Roedel <jroedel@suse.de>
+
+[ Upstream commit eb791aa70b90c559eeb371d807c8813d569393f0 ]
+
+The 'pt_root' and 'mode' struct members of 'struct protection_domain'
+need to be get/set atomically, otherwise the page-table of the domain
+can get corrupted.
+
+Merge the fields into one atomic64_t struct member which can be
+get/set atomically.
+
+Fixes: 92d420ec028d ("iommu/amd: Relax locking in dma_ops path")
+Reported-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Tested-by: Qian Cai <cai@lca.pw>
+Link: https://lore.kernel.org/r/20200504125413.16798-2-joro@8bytes.org
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd_iommu.c | 140 ++++++++++++++++++++++++--------
+ drivers/iommu/amd_iommu_types.h | 9 +-
+ 2 files changed, 112 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 20cce366e9518..28229a38af4d2 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -151,6 +151,26 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+ return container_of(dom, struct protection_domain, domain);
+ }
+
++static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
++ struct domain_pgtable *pgtable)
++{
++ u64 pt_root = atomic64_read(&domain->pt_root);
++
++ pgtable->root = (u64 *)(pt_root & PAGE_MASK);
++ pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
++}
++
++static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
++{
++ u64 pt_root;
++
++ /* lowest 3 bits encode pgtable mode */
++ pt_root = mode & 7;
++ pt_root |= (u64)root;
++
++ return pt_root;
++}
++
+ static struct iommu_dev_data *alloc_dev_data(u16 devid)
+ {
+ struct iommu_dev_data *dev_data;
+@@ -1397,13 +1417,18 @@ static struct page *free_sub_pt(unsigned long root, int mode,
+
+ static void free_pagetable(struct protection_domain *domain)
+ {
+- unsigned long root = (unsigned long)domain->pt_root;
++ struct domain_pgtable pgtable;
+ struct page *freelist = NULL;
++ unsigned long root;
++
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ atomic64_set(&domain->pt_root, 0);
+
+- BUG_ON(domain->mode < PAGE_MODE_NONE ||
+- domain->mode > PAGE_MODE_6_LEVEL);
++ BUG_ON(pgtable.mode < PAGE_MODE_NONE ||
++ pgtable.mode > PAGE_MODE_6_LEVEL);
+
+- freelist = free_sub_pt(root, domain->mode, freelist);
++ root = (unsigned long)pgtable.root;
++ freelist = free_sub_pt(root, pgtable.mode, freelist);
+
+ free_page_list(freelist);
+ }
+@@ -1417,24 +1442,28 @@ static bool increase_address_space(struct protection_domain *domain,
+ unsigned long address,
+ gfp_t gfp)
+ {
++ struct domain_pgtable pgtable;
+ unsigned long flags;
+ bool ret = false;
+- u64 *pte;
++ u64 *pte, root;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+- if (address <= PM_LEVEL_SIZE(domain->mode) ||
+- WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++
++ if (address <= PM_LEVEL_SIZE(pgtable.mode) ||
++ WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
+ goto out;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+ goto out;
+
+- *pte = PM_LEVEL_PDE(domain->mode,
+- iommu_virt_to_phys(domain->pt_root));
+- domain->pt_root = pte;
+- domain->mode += 1;
++ *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
++
++ root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode + 1);
++
++ atomic64_set(&domain->pt_root, root);
+
+ ret = true;
+
+@@ -1451,16 +1480,22 @@ static u64 *alloc_pte(struct protection_domain *domain,
+ gfp_t gfp,
+ bool *updated)
+ {
++ struct domain_pgtable pgtable;
+ int level, end_lvl;
+ u64 *pte, *page;
+
+ BUG_ON(!is_power_of_2(page_size));
+
+- while (address > PM_LEVEL_SIZE(domain->mode))
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++
++ while (address > PM_LEVEL_SIZE(pgtable.mode)) {
+ *updated = increase_address_space(domain, address, gfp) || *updated;
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ }
++
+
+- level = domain->mode - 1;
+- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
++ level = pgtable.mode - 1;
++ pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ address = PAGE_SIZE_ALIGN(address, page_size);
+ end_lvl = PAGE_SIZE_LEVEL(page_size);
+
+@@ -1536,16 +1571,19 @@ static u64 *fetch_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long *page_size)
+ {
++ struct domain_pgtable pgtable;
+ int level;
+ u64 *pte;
+
+ *page_size = 0;
+
+- if (address > PM_LEVEL_SIZE(domain->mode))
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++
++ if (address > PM_LEVEL_SIZE(pgtable.mode))
+ return NULL;
+
+- level = domain->mode - 1;
+- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
++ level = pgtable.mode - 1;
++ pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+
+ while (level > 0) {
+@@ -1806,6 +1844,7 @@ static void dma_ops_domain_free(struct protection_domain *domain)
+ static struct protection_domain *dma_ops_domain_alloc(void)
+ {
+ struct protection_domain *domain;
++ u64 *pt_root, root;
+
+ domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
+ if (!domain)
+@@ -1814,12 +1853,14 @@ static struct protection_domain *dma_ops_domain_alloc(void)
+ if (protection_domain_init(domain))
+ goto free_domain;
+
+- domain->mode = PAGE_MODE_3_LEVEL;
+- domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+- domain->flags = PD_DMA_OPS_MASK;
+- if (!domain->pt_root)
++ pt_root = (void *)get_zeroed_page(GFP_KERNEL);
++ if (!pt_root)
+ goto free_domain;
+
++ root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
++ atomic64_set(&domain->pt_root, root);
++ domain->flags = PD_DMA_OPS_MASK;
++
+ if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+ goto free_domain;
+
+@@ -1843,14 +1884,17 @@ static bool dma_ops_domain(struct protection_domain *domain)
+ static void set_dte_entry(u16 devid, struct protection_domain *domain,
+ bool ats, bool ppr)
+ {
++ struct domain_pgtable pgtable;
+ u64 pte_root = 0;
+ u64 flags = 0;
+ u32 old_domid;
+
+- if (domain->mode != PAGE_MODE_NONE)
+- pte_root = iommu_virt_to_phys(domain->pt_root);
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++
++ if (pgtable.mode != PAGE_MODE_NONE)
++ pte_root = iommu_virt_to_phys(pgtable.root);
+
+- pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
++ pte_root |= (pgtable.mode & DEV_ENTRY_MODE_MASK)
+ << DEV_ENTRY_MODE_SHIFT;
+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
+
+@@ -2375,6 +2419,7 @@ static struct protection_domain *protection_domain_alloc(void)
+ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+ {
+ struct protection_domain *pdomain;
++ u64 *pt_root, root;
+
+ switch (type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+@@ -2382,13 +2427,15 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+ if (!pdomain)
+ return NULL;
+
+- pdomain->mode = PAGE_MODE_3_LEVEL;
+- pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+- if (!pdomain->pt_root) {
++ pt_root = (void *)get_zeroed_page(GFP_KERNEL);
++ if (!pt_root) {
+ protection_domain_free(pdomain);
+ return NULL;
+ }
+
++ root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
++ atomic64_set(&pdomain->pt_root, root);
++
+ pdomain->domain.geometry.aperture_start = 0;
+ pdomain->domain.geometry.aperture_end = ~0ULL;
+ pdomain->domain.geometry.force_aperture = true;
+@@ -2406,7 +2453,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+ if (!pdomain)
+ return NULL;
+
+- pdomain->mode = PAGE_MODE_NONE;
++ atomic64_set(&pdomain->pt_root, PAGE_MODE_NONE);
+ break;
+ default:
+ return NULL;
+@@ -2418,6 +2465,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+ static void amd_iommu_domain_free(struct iommu_domain *dom)
+ {
+ struct protection_domain *domain;
++ struct domain_pgtable pgtable;
+
+ domain = to_pdomain(dom);
+
+@@ -2435,7 +2483,9 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
+ dma_ops_domain_free(domain);
+ break;
+ default:
+- if (domain->mode != PAGE_MODE_NONE)
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++
++ if (pgtable.mode != PAGE_MODE_NONE)
+ free_pagetable(domain);
+
+ if (domain->flags & PD_IOMMUV2_MASK)
+@@ -2518,10 +2568,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
+ gfp_t gfp)
+ {
+ struct protection_domain *domain = to_pdomain(dom);
++ struct domain_pgtable pgtable;
+ int prot = 0;
+ int ret;
+
+- if (domain->mode == PAGE_MODE_NONE)
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ if (pgtable.mode == PAGE_MODE_NONE)
+ return -EINVAL;
+
+ if (iommu_prot & IOMMU_READ)
+@@ -2541,8 +2593,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ struct iommu_iotlb_gather *gather)
+ {
+ struct protection_domain *domain = to_pdomain(dom);
++ struct domain_pgtable pgtable;
+
+- if (domain->mode == PAGE_MODE_NONE)
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ if (pgtable.mode == PAGE_MODE_NONE)
+ return 0;
+
+ return iommu_unmap_page(domain, iova, page_size);
+@@ -2553,9 +2607,11 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+ {
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long offset_mask, pte_pgsize;
++ struct domain_pgtable pgtable;
+ u64 *pte, __pte;
+
+- if (domain->mode == PAGE_MODE_NONE)
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ if (pgtable.mode == PAGE_MODE_NONE)
+ return iova;
+
+ pte = fetch_pte(domain, iova, &pte_pgsize);
+@@ -2708,16 +2764,26 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
+ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
+ {
+ struct protection_domain *domain = to_pdomain(dom);
++ struct domain_pgtable pgtable;
+ unsigned long flags;
++ u64 pt_root;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
++ /* First save pgtable configuration*/
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++
+ /* Update data structure */
+- domain->mode = PAGE_MODE_NONE;
++ pt_root = amd_iommu_domain_encode_pgtable(NULL, PAGE_MODE_NONE);
++ atomic64_set(&domain->pt_root, pt_root);
+
+ /* Make changes visible to IOMMUs */
+ update_domain(domain);
+
++ /* Restore old pgtable in domain->ptroot to free page-table */
++ pt_root = amd_iommu_domain_encode_pgtable(pgtable.root, pgtable.mode);
++ atomic64_set(&domain->pt_root, pt_root);
++
+ /* Page-table is not visible to IOMMU anymore, so free it */
+ free_pagetable(domain);
+
+@@ -2908,9 +2974,11 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+ static int __set_gcr3(struct protection_domain *domain, int pasid,
+ unsigned long cr3)
+ {
++ struct domain_pgtable pgtable;
+ u64 *pte;
+
+- if (domain->mode != PAGE_MODE_NONE)
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ if (pgtable.mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
+@@ -2924,9 +2992,11 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
+
+ static int __clear_gcr3(struct protection_domain *domain, int pasid)
+ {
++ struct domain_pgtable pgtable;
+ u64 *pte;
+
+- if (domain->mode != PAGE_MODE_NONE)
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ if (pgtable.mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index ca8c4522045b3..7a8fdec138bd1 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -468,8 +468,7 @@ struct protection_domain {
+ iommu core code */
+ spinlock_t lock; /* mostly used to lock the page table*/
+ u16 id; /* the domain id written to the device table */
+- int mode; /* paging mode (0-6 levels) */
+- u64 *pt_root; /* page table root pointer */
++ atomic64_t pt_root; /* pgtable root and pgtable mode */
+ int glx; /* Number of levels for GCR3 table */
+ u64 *gcr3_tbl; /* Guest CR3 table */
+ unsigned long flags; /* flags to find out type of domain */
+@@ -477,6 +476,12 @@ struct protection_domain {
+ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+ };
+
++/* For decocded pt_root */
++struct domain_pgtable {
++ int mode;
++ u64 *root;
++};
++
+ /*
+ * Structure where we save information about one hardware AMD IOMMU in the
+ * system.
+--
+2.20.1
+
--- /dev/null
+From cf6f0b2a460992f6cf2a151842a24e3fd3700374 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 14:54:12 +0200
+Subject: iommu/amd: Update Device Table in increase_address_space()
+
+From: Joerg Roedel <jroedel@suse.de>
+
+[ Upstream commit 19c6978fba68a2cdedee7d55fb8c3063d47982d9 ]
+
+The Device Table needs to be updated before the new page-table root
+can be published in domain->pt_root. Otherwise a concurrent call to
+fetch_pte might fetch a PTE which is not reachable through the Device
+Table Entry.
+
+Fixes: 92d420ec028d ("iommu/amd: Relax locking in dma_ops path")
+Reported-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Tested-by: Qian Cai <cai@lca.pw>
+Link: https://lore.kernel.org/r/20200504125413.16798-5-joro@8bytes.org
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd_iommu.c | 49 ++++++++++++++++++++++++++++-----------
+ 1 file changed, 36 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 28229a38af4d2..500d0a8c966fc 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -101,6 +101,8 @@ struct kmem_cache *amd_iommu_irq_cache;
+ static void update_domain(struct protection_domain *domain);
+ static int protection_domain_init(struct protection_domain *domain);
+ static void detach_device(struct device *dev);
++static void update_and_flush_device_table(struct protection_domain *domain,
++ struct domain_pgtable *pgtable);
+
+ /****************************************************************************
+ *
+@@ -1461,8 +1463,16 @@ static bool increase_address_space(struct protection_domain *domain,
+
+ *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+
+- root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode + 1);
++ pgtable.root = pte;
++ pgtable.mode += 1;
++ update_and_flush_device_table(domain, &pgtable);
++ domain_flush_complete(domain);
+
++ /*
++ * Device Table needs to be updated and flushed before the new root can
++ * be published.
++ */
++ root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
+ atomic64_set(&domain->pt_root, root);
+
+ ret = true;
+@@ -1882,19 +1892,17 @@ static bool dma_ops_domain(struct protection_domain *domain)
+ }
+
+ static void set_dte_entry(u16 devid, struct protection_domain *domain,
++ struct domain_pgtable *pgtable,
+ bool ats, bool ppr)
+ {
+- struct domain_pgtable pgtable;
+ u64 pte_root = 0;
+ u64 flags = 0;
+ u32 old_domid;
+
+- amd_iommu_domain_get_pgtable(domain, &pgtable);
++ if (pgtable->mode != PAGE_MODE_NONE)
++ pte_root = iommu_virt_to_phys(pgtable->root);
+
+- if (pgtable.mode != PAGE_MODE_NONE)
+- pte_root = iommu_virt_to_phys(pgtable.root);
+-
+- pte_root |= (pgtable.mode & DEV_ENTRY_MODE_MASK)
++ pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK)
+ << DEV_ENTRY_MODE_SHIFT;
+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
+
+@@ -1967,6 +1975,7 @@ static void clear_dte_entry(u16 devid)
+ static void do_attach(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
+ {
++ struct domain_pgtable pgtable;
+ struct amd_iommu *iommu;
+ bool ats;
+
+@@ -1982,7 +1991,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
+ domain->dev_cnt += 1;
+
+ /* Update device table */
+- set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ set_dte_entry(dev_data->devid, domain, &pgtable,
++ ats, dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
+
+ device_flush_dte(dev_data);
+@@ -2293,22 +2304,34 @@ static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
+ *
+ *****************************************************************************/
+
+-static void update_device_table(struct protection_domain *domain)
++static void update_device_table(struct protection_domain *domain,
++ struct domain_pgtable *pgtable)
+ {
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+- set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
+- dev_data->iommu_v2);
++ set_dte_entry(dev_data->devid, domain, pgtable,
++ dev_data->ats.enabled, dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
+ }
+ }
+
++static void update_and_flush_device_table(struct protection_domain *domain,
++ struct domain_pgtable *pgtable)
++{
++ update_device_table(domain, pgtable);
++ domain_flush_devices(domain);
++}
++
+ static void update_domain(struct protection_domain *domain)
+ {
+- update_device_table(domain);
++ struct domain_pgtable pgtable;
+
+- domain_flush_devices(domain);
++ /* Update device table */
++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++ update_and_flush_device_table(domain, &pgtable);
++
++ /* Flush domain TLB(s) and wait for completion */
+ domain_flush_tlb_pde(domain);
+ }
+
+--
+2.20.1
+
--- /dev/null
+From e0c465e7126ea1c97592d8792b9c65a7893f80f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Apr 2020 16:21:20 +0100
+Subject: KVM: arm: vgic: Synchronize the whole guest on GIC{D,R}_I{S,C}ACTIVER
+ read
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit 9a50ebbffa9862db7604345f5fd763122b0f6fed ]
+
+When a guest tries to read the active state of its interrupts,
+we currently just return whatever state we have in memory. This
+means that if such an interrupt lives in a List Register on another
+CPU, we fail to obsertve the latest active state for this interrupt.
+
+In order to remedy this, stop all the other vcpus so that they exit
+and we can observe the most recent value for the state. This is
+similar to what we are doing for the write side of the same
+registers, and results in new MMIO handlers for userspace (which
+do not need to stop the guest, as it is supposed to be stopped
+already).
+
+Reported-by: Julien Grall <julien@xen.org>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/arm/vgic/vgic-mmio-v2.c | 4 +-
+ virt/kvm/arm/vgic/vgic-mmio-v3.c | 12 ++--
+ virt/kvm/arm/vgic/vgic-mmio.c | 100 ++++++++++++++++++++-----------
+ virt/kvm/arm/vgic/vgic-mmio.h | 3 +
+ 4 files changed, 75 insertions(+), 44 deletions(-)
+
+diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
+index 5945f062d7497..d63881f60e1a5 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
++++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
+@@ -422,11 +422,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
+ vgic_mmio_read_active, vgic_mmio_write_sactive,
+- NULL, vgic_mmio_uaccess_write_sactive, 1,
++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
+ vgic_mmio_read_active, vgic_mmio_write_cactive,
+- NULL, vgic_mmio_uaccess_write_cactive, 1,
++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
+ vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
+diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
+index ebc218840fc22..b1b066c148cee 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
++++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
+@@ -494,11 +494,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
+ vgic_mmio_read_active, vgic_mmio_write_sactive,
+- NULL, vgic_mmio_uaccess_write_sactive, 1,
++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
+ vgic_mmio_read_active, vgic_mmio_write_cactive,
+- NULL, vgic_mmio_uaccess_write_cactive,
++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
+ 1, VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
+ vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
+@@ -566,12 +566,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
+ vgic_mmio_read_active, vgic_mmio_write_sactive,
+- NULL, vgic_mmio_uaccess_write_sactive,
+- 4, VGIC_ACCESS_32bit),
++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
++ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
+ vgic_mmio_read_active, vgic_mmio_write_cactive,
+- NULL, vgic_mmio_uaccess_write_cactive,
+- 4, VGIC_ACCESS_32bit),
++ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
++ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
+ vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
+index e7abd05ea8964..f659654b09a83 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -279,8 +279,39 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ }
+ }
+
+-unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+- gpa_t addr, unsigned int len)
++
++/*
++ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
++ * is not queued on some running VCPU's LRs, because then the change to the
++ * active state can be overwritten when the VCPU's state is synced coming back
++ * from the guest.
++ *
++ * For shared interrupts as well as GICv3 private interrupts, we have to
++ * stop all the VCPUs because interrupts can be migrated while we don't hold
++ * the IRQ locks and we don't want to be chasing moving targets.
++ *
++ * For GICv2 private interrupts we don't have to do anything because
++ * userspace accesses to the VGIC state already require all VCPUs to be
++ * stopped, and only the VCPU itself can modify its private interrupts
++ * active state, which guarantees that the VCPU is not running.
++ */
++static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
++{
++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
++ intid >= VGIC_NR_PRIVATE_IRQS)
++ kvm_arm_halt_guest(vcpu->kvm);
++}
++
++/* See vgic_access_active_prepare */
++static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
++{
++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
++ intid >= VGIC_NR_PRIVATE_IRQS)
++ kvm_arm_resume_guest(vcpu->kvm);
++}
++
++static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len)
+ {
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ u32 value = 0;
+@@ -290,6 +321,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ for (i = 0; i < len * 8; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
++ /*
++ * Even for HW interrupts, don't evaluate the HW state as
++ * all the guest is interested in is the virtual state.
++ */
+ if (irq->active)
+ value |= (1U << i);
+
+@@ -299,6 +334,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ return value;
+ }
+
++unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len)
++{
++ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
++ u32 val;
++
++ mutex_lock(&vcpu->kvm->lock);
++ vgic_access_active_prepare(vcpu, intid);
++
++ val = __vgic_mmio_read_active(vcpu, addr, len);
++
++ vgic_access_active_finish(vcpu, intid);
++ mutex_unlock(&vcpu->kvm->lock);
++
++ return val;
++}
++
++unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len)
++{
++ return __vgic_mmio_read_active(vcpu, addr, len);
++}
++
+ /* Must be called with irq->irq_lock held */
+ static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool active, bool is_uaccess)
+@@ -350,36 +408,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
+
+-/*
+- * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
+- * is not queued on some running VCPU's LRs, because then the change to the
+- * active state can be overwritten when the VCPU's state is synced coming back
+- * from the guest.
+- *
+- * For shared interrupts, we have to stop all the VCPUs because interrupts can
+- * be migrated while we don't hold the IRQ locks and we don't want to be
+- * chasing moving targets.
+- *
+- * For private interrupts we don't have to do anything because userspace
+- * accesses to the VGIC state already require all VCPUs to be stopped, and
+- * only the VCPU itself can modify its private interrupts active state, which
+- * guarantees that the VCPU is not running.
+- */
+-static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
+-{
+- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+- intid >= VGIC_NR_PRIVATE_IRQS)
+- kvm_arm_halt_guest(vcpu->kvm);
+-}
+-
+-/* See vgic_change_active_prepare */
+-static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+-{
+- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+- intid >= VGIC_NR_PRIVATE_IRQS)
+- kvm_arm_resume_guest(vcpu->kvm);
+-}
+-
+ static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+@@ -401,11 +429,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+
+ mutex_lock(&vcpu->kvm->lock);
+- vgic_change_active_prepare(vcpu, intid);
++ vgic_access_active_prepare(vcpu, intid);
+
+ __vgic_mmio_write_cactive(vcpu, addr, len, val);
+
+- vgic_change_active_finish(vcpu, intid);
++ vgic_access_active_finish(vcpu, intid);
+ mutex_unlock(&vcpu->kvm->lock);
+ }
+
+@@ -438,11 +466,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+
+ mutex_lock(&vcpu->kvm->lock);
+- vgic_change_active_prepare(vcpu, intid);
++ vgic_access_active_prepare(vcpu, intid);
+
+ __vgic_mmio_write_sactive(vcpu, addr, len, val);
+
+- vgic_change_active_finish(vcpu, intid);
++ vgic_access_active_finish(vcpu, intid);
+ mutex_unlock(&vcpu->kvm->lock);
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
+index 5af2aefad4359..30713a44e3faa 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.h
++++ b/virt/kvm/arm/vgic/vgic-mmio.h
+@@ -152,6 +152,9 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
++unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len);
++
+ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+--
+2.20.1
+
--- /dev/null
+From f3c23a79d60434424a9352137f5993da3139503b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2020 13:05:26 +0100
+Subject: KVM: arm: vgic-v2: Only use the virtual state when userspace accesses
+ pending bits
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit ba1ed9e17b581c9a204ec1d72d40472dd8557edd ]
+
+There is no point in accessing the HW when writing to any of the
+ISPENDR/ICPENDR registers from userspace, as only the guest should
+be allowed to change the HW state.
+
+Introduce new userspace-specific accessors that deal solely with
+the virtual state. Note that the API differs from that of GICv3,
+where userspace exclusively uses ISPENDR to set the state. Too
+bad we can't reuse it.
+
+Fixes: 82e40f558de56 ("KVM: arm/arm64: vgic-v2: Handle SGI bits in GICD_I{S,C}PENDR0 as WI")
+Reviewed-by: James Morse <james.morse@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/arm/vgic/vgic-mmio-v2.c | 6 ++-
+ virt/kvm/arm/vgic/vgic-mmio.c | 87 ++++++++++++++++++++++++--------
+ virt/kvm/arm/vgic/vgic-mmio.h | 8 +++
+ 3 files changed, 77 insertions(+), 24 deletions(-)
+
+diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
+index d63881f60e1a5..7b288eb391b84 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
++++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
+@@ -415,10 +415,12 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
+- vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
++ vgic_mmio_read_pending, vgic_mmio_write_spending,
++ NULL, vgic_uaccess_write_spending, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
+- vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
++ vgic_mmio_read_pending, vgic_mmio_write_cpending,
++ NULL, vgic_uaccess_write_cpending, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
+ vgic_mmio_read_active, vgic_mmio_write_sactive,
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
+index f659654b09a83..b6824bba8248b 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -179,17 +179,6 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
+ return value;
+ }
+
+-/* Must be called with irq->irq_lock held */
+-static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+- bool is_uaccess)
+-{
+- if (is_uaccess)
+- return;
+-
+- irq->pending_latch = true;
+- vgic_irq_set_phys_active(irq, true);
+-}
+-
+ static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+ {
+ return (vgic_irq_is_sgi(irq->intid) &&
+@@ -200,7 +189,6 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+ {
+- bool is_uaccess = !kvm_get_running_vcpu();
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+ unsigned long flags;
+@@ -215,22 +203,49 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+ }
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
++
++ irq->pending_latch = true;
+ if (irq->hw)
+- vgic_hw_irq_spending(vcpu, irq, is_uaccess);
+- else
+- irq->pending_latch = true;
++ vgic_irq_set_phys_active(irq, true);
++
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+ }
+
+-/* Must be called with irq->irq_lock held */
+-static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+- bool is_uaccess)
++int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len,
++ unsigned long val)
+ {
+- if (is_uaccess)
+- return;
++ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
++ int i;
++ unsigned long flags;
++
++ for_each_set_bit(i, &val, len * 8) {
++ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
++
++ raw_spin_lock_irqsave(&irq->irq_lock, flags);
++ irq->pending_latch = true;
++
++ /*
++ * GICv2 SGIs are terribly broken. We can't restore
++ * the source of the interrupt, so just pick the vcpu
++ * itself as the source...
++ */
++ if (is_vgic_v2_sgi(vcpu, irq))
++ irq->source |= BIT(vcpu->vcpu_id);
++
++ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
++
++ vgic_put_irq(vcpu->kvm, irq);
++ }
+
++ return 0;
++}
++
++/* Must be called with irq->irq_lock held */
++static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
++{
+ irq->pending_latch = false;
+
+ /*
+@@ -253,7 +268,6 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+ {
+- bool is_uaccess = !kvm_get_running_vcpu();
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+ unsigned long flags;
+@@ -270,7 +284,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (irq->hw)
+- vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
++ vgic_hw_irq_cpending(vcpu, irq);
+ else
+ irq->pending_latch = false;
+
+@@ -279,6 +293,35 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ }
+ }
+
++int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len,
++ unsigned long val)
++{
++ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
++ int i;
++ unsigned long flags;
++
++ for_each_set_bit(i, &val, len * 8) {
++ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
++
++ raw_spin_lock_irqsave(&irq->irq_lock, flags);
++ /*
++ * More fun with GICv2 SGIs! If we're clearing one of them
++ * from userspace, which source vcpu to clear? Let's not
++ * even think of it, and blow the whole set.
++ */
++ if (is_vgic_v2_sgi(vcpu, irq))
++ irq->source = 0;
++
++ irq->pending_latch = false;
++
++ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
++
++ vgic_put_irq(vcpu->kvm, irq);
++ }
++
++ return 0;
++}
+
+ /*
+ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
+index 30713a44e3faa..b127f889113ed 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.h
++++ b/virt/kvm/arm/vgic/vgic-mmio.h
+@@ -149,6 +149,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
++int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len,
++ unsigned long val);
++
++int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len,
++ unsigned long val);
++
+ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+--
+2.20.1
+
--- /dev/null
+From 93f0534f01401ea1cdd7ae6a5706c766b0d4b4e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2020 12:27:44 -0800
+Subject: KVM: nVMX: Consolidate nested MTF checks to helper function
+
+From: Oliver Upton <oupton@google.com>
+
+[ Upstream commit 212617dbb6bac2a21dec6ef7d6012d96bb6dbb5d ]
+
+commit 5ef8acbdd687 ("KVM: nVMX: Emulate MTF when performing
+instruction emulation") introduced a helper to check the MTF
+VM-execution control in vmcs12. Change pre-existing check in
+nested_vmx_exit_reflected() to instead use the helper.
+
+Signed-off-by: Oliver Upton <oupton@google.com>
+Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
+Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/vmx/nested.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index eec7b2d93104c..b773989308015 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5618,7 +5618,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+ case EXIT_REASON_MONITOR_TRAP_FLAG:
+- return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
++ return nested_cpu_has_mtf(vmcs12);
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
+ case EXIT_REASON_PAUSE_INSTRUCTION:
+--
+2.20.1
+
--- /dev/null
+From c4914b12ffe2715c5af0ef4ba1024988b9d65e6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Apr 2020 22:47:45 +0000
+Subject: kvm: nVMX: reflect MTF VM-exits if injected by L1
+
+From: Oliver Upton <oupton@google.com>
+
+[ Upstream commit b045ae906b42afb361dc7ecf1a3cea110fb0a65f ]
+
+According to SDM 26.6.2, it is possible to inject an MTF VM-exit via the
+VM-entry interruption-information field regardless of the 'monitor trap
+flag' VM-execution control. KVM appropriately copies the VM-entry
+interruption-information field from vmcs12 to vmcs02. However, if L1
+has not set the 'monitor trap flag' VM-execution control, KVM fails to
+reflect the subsequent MTF VM-exit into L1.
+
+Fix this by consulting the VM-entry interruption-information field of
+vmcs12 to determine if L1 has injected the MTF VM-exit. If so, reflect
+the exit, regardless of the 'monitor trap flag' VM-execution control.
+
+Fixes: 5f3d45e7f282 ("kvm/x86: add support for MONITOR_TRAP_FLAG")
+Signed-off-by: Oliver Upton <oupton@google.com>
+Reviewed-by: Peter Shier <pshier@google.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Message-Id: <20200414224746.240324-1-oupton@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/vmx/nested.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index b773989308015..3a2f05ef51fa4 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5504,6 +5504,23 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
+ return 1 & (b >> (field & 7));
+ }
+
++static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
++{
++ u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
++
++ if (nested_cpu_has_mtf(vmcs12))
++ return true;
++
++ /*
++ * An MTF VM-exit may be injected into the guest by setting the
++ * interruption-type to 7 (other event) and the vector field to 0. Such
++ * is the case regardless of the 'monitor trap flag' VM-execution
++ * control.
++ */
++ return entry_intr_info == (INTR_INFO_VALID_MASK
++ | INTR_TYPE_OTHER_EVENT);
++}
++
+ /*
+ * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
+ * should handle it ourselves in L0 (and then continue L2). Only call this
+@@ -5618,7 +5635,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+ case EXIT_REASON_MONITOR_TRAP_FLAG:
+- return nested_cpu_has_mtf(vmcs12);
++ return nested_vmx_exit_handled_mtf(vmcs12);
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
+ case EXIT_REASON_PAUSE_INSTRUCTION:
+--
+2.20.1
+
--- /dev/null
+From 2dc10caab9c946258295051933cc1a13a3201cd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 01:20:26 +0300
+Subject: net: dsa: ocelot: the MAC table on Felix is twice as large
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 21ce7f3e16fbf89faaf149cfe0f730edfc553914 ]
+
+When running 'bridge fdb dump' on Felix, sometimes learnt and static MAC
+addresses would appear, sometimes they wouldn't.
+
+Turns out, the MAC table has 4096 entries on VSC7514 (Ocelot) and 8192
+entries on VSC9959 (Felix), so the existing code from the Ocelot common
+library only dumped half of Felix's MAC table. They are both organized
+as a 4-way set-associative TCAM, so we just need a single variable
+indicating the correct number of rows.
+
+Fixes: 56051948773e ("net: dsa: ocelot: add driver for Felix switch family")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/ocelot/felix.c | 1 +
+ drivers/net/dsa/ocelot/felix.h | 1 +
+ drivers/net/dsa/ocelot/felix_vsc9959.c | 1 +
+ drivers/net/ethernet/mscc/ocelot.c | 6 ++----
+ drivers/net/ethernet/mscc/ocelot_regs.c | 1 +
+ include/soc/mscc/ocelot.h | 1 +
+ 6 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 9e895ab586d5a..a7780c06fa65b 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -397,6 +397,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
+ ocelot->stats_layout = felix->info->stats_layout;
+ ocelot->num_stats = felix->info->num_stats;
+ ocelot->shared_queue_sz = felix->info->shared_queue_sz;
++ ocelot->num_mact_rows = felix->info->num_mact_rows;
+ ocelot->ops = felix->info->ops;
+
+ port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
+diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
+index 3a7580015b621..8771d40324f10 100644
+--- a/drivers/net/dsa/ocelot/felix.h
++++ b/drivers/net/dsa/ocelot/felix.h
+@@ -15,6 +15,7 @@ struct felix_info {
+ const u32 *const *map;
+ const struct ocelot_ops *ops;
+ int shared_queue_sz;
++ int num_mact_rows;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+ int num_ports;
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 2c812b481778c..edc1a67c002b6 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1090,6 +1090,7 @@ struct felix_info felix_info_vsc9959 = {
+ .stats_layout = vsc9959_stats_layout,
+ .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
+ .shared_queue_sz = 128 * 1024,
++ .num_mact_rows = 2048,
+ .num_ports = 6,
+ .switch_pci_bar = 4,
+ .imdio_pci_bar = 0,
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index b14286dc49fb5..33ef8690eafe9 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1016,10 +1016,8 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ {
+ int i, j;
+
+- /* Loop through all the mac tables entries. There are 1024 rows of 4
+- * entries.
+- */
+- for (i = 0; i < 1024; i++) {
++ /* Loop through all the mac tables entries. */
++ for (i = 0; i < ocelot->num_mact_rows; i++) {
+ for (j = 0; j < 4; j++) {
+ struct ocelot_mact_entry entry;
+ bool is_static;
+diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
+index b88b5899b2273..7d4fd1b6addaf 100644
+--- a/drivers/net/ethernet/mscc/ocelot_regs.c
++++ b/drivers/net/ethernet/mscc/ocelot_regs.c
+@@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
+ ocelot->stats_layout = ocelot_stats_layout;
+ ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
+ ocelot->shared_queue_sz = 224 * 1024;
++ ocelot->num_mact_rows = 1024;
+ ocelot->ops = ops;
+
+ ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
+index f8e1955c86f14..7b5382e10bd29 100644
+--- a/include/soc/mscc/ocelot.h
++++ b/include/soc/mscc/ocelot.h
+@@ -437,6 +437,7 @@ struct ocelot {
+ unsigned int num_stats;
+
+ int shared_queue_sz;
++ int num_mact_rows;
+
+ struct net_device *hw_bridge_dev;
+ u16 bridge_mask;
+--
+2.20.1
+
--- /dev/null
+From de35f6f92b03c5b9586bb7fd63b906045e12ee81 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Apr 2020 00:59:00 -0700
+Subject: net: Make PTP-specific drivers depend on PTP_1588_CLOCK
+
+From: Clay McClure <clay@daemons.net>
+
+[ Upstream commit b6d49cab44b567b3e0a5544b3d61e516a7355fad ]
+
+Commit d1cbfd771ce8 ("ptp_clock: Allow for it to be optional") changed
+all PTP-capable Ethernet drivers from `select PTP_1588_CLOCK` to `imply
+PTP_1588_CLOCK`, "in order to break the hard dependency between the PTP
+clock subsystem and ethernet drivers capable of being clock providers."
+As a result it is possible to build PTP-capable Ethernet drivers without
+the PTP subsystem by deselecting PTP_1588_CLOCK. Drivers are required to
+handle the missing dependency gracefully.
+
+Some PTP-capable Ethernet drivers (e.g., TI_CPSW) factor their PTP code
+out into separate drivers (e.g., TI_CPTS_MOD). The above commit also
+changed these PTP-specific drivers to `imply PTP_1588_CLOCK`, making it
+possible to build them without the PTP subsystem. But as Grygorii
+Strashko noted in [1]:
+
+On Wed, Apr 22, 2020 at 02:16:11PM +0300, Grygorii Strashko wrote:
+
+> Another question is that CPTS completely nonfunctional in this case and
+> it was never expected that somebody will even try to use/run such
+> configuration (except for random build purposes).
+
+In my view, enabling a PTP-specific driver without the PTP subsystem is
+a configuration error made possible by the above commit. Kconfig should
+not allow users to create a configuration with missing dependencies that
+results in "completely nonfunctional" drivers.
+
+I audited all network drivers that call ptp_clock_register() but merely
+`imply PTP_1588_CLOCK` and found five PTP-specific drivers that are
+likely nonfunctional without PTP_1588_CLOCK:
+
+ NET_DSA_MV88E6XXX_PTP
+ NET_DSA_SJA1105_PTP
+ MACB_USE_HWSTAMP
+ CAVIUM_PTP
+ TI_CPTS_MOD
+
+Note how these symbols all reference PTP or timestamping in their name;
+this is a clue that they depend on PTP_1588_CLOCK.
+
+Change them from `imply PTP_1588_CLOCK` [2] to `depends on PTP_1588_CLOCK`.
+I'm not using `select PTP_1588_CLOCK` here because PTP_1588_CLOCK has
+its own dependencies, which `select` would not transitively apply.
+
+Additionally, remove the `select NET_PTP_CLASSIFY` from CPTS_TI_MOD;
+PTP_1588_CLOCK already selects that.
+
+[1]: https://lore.kernel.org/lkml/c04458ed-29ee-1797-3a11-7f3f560553e6@ti.com/
+
+[2]: NET_DSA_SJA1105_PTP had never declared any type of dependency on
+PTP_1588_CLOCK (`imply` or otherwise); adding a `depends on PTP_1588_CLOCK`
+here seems appropriate.
+
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Nicolas Pitre <nico@fluxnic.net>
+Cc: Grygorii Strashko <grygorii.strashko@ti.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Fixes: d1cbfd771ce8 ("ptp_clock: Allow for it to be optional")
+Signed-off-by: Clay McClure <clay@daemons.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/Kconfig | 2 +-
+ drivers/net/dsa/sja1105/Kconfig | 1 +
+ drivers/net/ethernet/cadence/Kconfig | 2 +-
+ drivers/net/ethernet/cavium/Kconfig | 2 +-
+ drivers/net/ethernet/ti/Kconfig | 3 +--
+ 5 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
+index 6435020d690dd..51185e4d7d15e 100644
+--- a/drivers/net/dsa/mv88e6xxx/Kconfig
++++ b/drivers/net/dsa/mv88e6xxx/Kconfig
+@@ -24,8 +24,8 @@ config NET_DSA_MV88E6XXX_PTP
+ bool "PTP support for Marvell 88E6xxx"
+ default n
+ depends on NET_DSA_MV88E6XXX_GLOBAL2
++ depends on PTP_1588_CLOCK
+ imply NETWORK_PHY_TIMESTAMPING
+- imply PTP_1588_CLOCK
+ help
+ Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
+ chips that support it.
+diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
+index 0fe1ae173aa1a..68c3086af9af8 100644
+--- a/drivers/net/dsa/sja1105/Kconfig
++++ b/drivers/net/dsa/sja1105/Kconfig
+@@ -20,6 +20,7 @@ tristate "NXP SJA1105 Ethernet switch family support"
+ config NET_DSA_SJA1105_PTP
+ bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
+ depends on NET_DSA_SJA1105
++ depends on PTP_1588_CLOCK
+ help
+ This enables support for timestamping and PTP clock manipulations in
+ the SJA1105 DSA driver.
+diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
+index 53b50c24d9c95..2c4c12b03502d 100644
+--- a/drivers/net/ethernet/cadence/Kconfig
++++ b/drivers/net/ethernet/cadence/Kconfig
+@@ -35,8 +35,8 @@ config MACB
+ config MACB_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on MACB
++ depends on PTP_1588_CLOCK
+ default y
+- imply PTP_1588_CLOCK
+ ---help---
+ Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
+
+diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
+index 6a700d34019e3..4520e7ee00fe1 100644
+--- a/drivers/net/ethernet/cavium/Kconfig
++++ b/drivers/net/ethernet/cavium/Kconfig
+@@ -54,7 +54,7 @@ config THUNDER_NIC_RGX
+ config CAVIUM_PTP
+ tristate "Cavium PTP coprocessor as PTP clock"
+ depends on 64BIT && PCI
+- imply PTP_1588_CLOCK
++ depends on PTP_1588_CLOCK
+ ---help---
+ This driver adds support for the Precision Time Protocol Clocks and
+ Timestamping coprocessor (PTP) found on Cavium processors.
+diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
+index bf98e0fa7d8be..cd6eda83e1f8c 100644
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -89,9 +89,8 @@ config TI_CPTS
+ config TI_CPTS_MOD
+ tristate
+ depends on TI_CPTS
++ depends on PTP_1588_CLOCK
+ default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
+- select NET_PTP_CLASSIFY
+- imply PTP_1588_CLOCK
+ default m
+
+ config TI_KEYSTONE_NETCP
+--
+2.20.1
+
--- /dev/null
+From 337bfca563ca0a2369f89f4a8cff60f01cffaa4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Apr 2020 22:59:21 +0200
+Subject: net: moxa: Fix a potential double 'free_irq()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit ee8d2267f0e39a1bfd95532da3a6405004114b27 ]
+
+Should an irq requested with 'devm_request_irq' be released explicitly,
+it should be done by 'devm_free_irq()', not 'free_irq()'.
+
+Fixes: 6c821bd9edc9 ("net: Add MOXA ART SoCs ethernet driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/moxa/moxart_ether.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
+index e1651756bf9da..f70bb81e1ed65 100644
+--- a/drivers/net/ethernet/moxa/moxart_ether.c
++++ b/drivers/net/ethernet/moxa/moxart_ether.c
+@@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_netdev(ndev);
+- free_irq(ndev->irq, ndev);
++ devm_free_irq(&pdev->dev, ndev->irq, ndev);
+ moxart_mac_free_memory(ndev);
+ free_netdev(ndev);
+
+--
+2.20.1
+
--- /dev/null
+From e62c16ec7eba96e1cc880097c473877e1bc9a3cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 01:20:27 +0300
+Subject: net: mscc: ocelot: ANA_AUTOAGE_AGE_PERIOD holds a value in seconds,
+ not ms
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit c0d7eccbc76115b7eb337956c03d47d6a889cf8c ]
+
+One may notice that automatically-learnt entries 'never' expire, even
+though the bridge configures the address age period at 300 seconds.
+
+Actually the value written to hardware corresponds to a time interval
+1000 times higher than intended, i.e. 83 hours.
+
+Fixes: a556c76adc05 ("net: mscc: Add initial Ocelot switch support")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Florian Faineli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mscc/ocelot.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 33ef8690eafe9..419e2ce2eac07 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1444,8 +1444,15 @@ static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
+
+ void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
+ {
+- ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
+- ANA_AUTOAGE);
++ unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
++
++ /* Setting AGE_PERIOD to zero effectively disables automatic aging,
++ * which is clearly not what our intention is. So avoid that.
++ */
++ if (!age_period)
++ age_period = 1;
++
++ ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
+ }
+ EXPORT_SYMBOL(ocelot_set_ageing_time);
+
+--
+2.20.1
+
--- /dev/null
+From f90c0e4ca7b18167d6e371b362e89f7e0a6df1fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Apr 2020 11:51:41 -0400
+Subject: net: phy: microchip_t1: add lan87xx_phy_init to initialize the
+ lan87xx phy.
+
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+
+[ Upstream commit 63edbcceef612bdd95fa28ce100460c7b79008a4 ]
+
+lan87xx_phy_init() initializes the lan87xx phy hardware
+including its TC10 Wake-up and Sleep features.
+
+Fixes: 3e50d2da5850 ("Add driver for Microchip LAN87XX T1 PHYs")
+Signed-off-by: Yuiko Oshino <yuiko.oshino@microchip.com>
+v0->v1:
+ - Add more details in the commit message and source comments.
+ - Update to the latest initialization sequences.
+ - Add access_ereg_modify_changed().
+ - Fix access_ereg() to access SMI bank correctly.
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/microchip_t1.c | 171 +++++++++++++++++++++++++++++++++
+ 1 file changed, 171 insertions(+)
+
+diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
+index 001def4509c29..fed3e395f18e1 100644
+--- a/drivers/net/phy/microchip_t1.c
++++ b/drivers/net/phy/microchip_t1.c
+@@ -3,9 +3,21 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/delay.h>
+ #include <linux/mii.h>
+ #include <linux/phy.h>
+
++/* External Register Control Register */
++#define LAN87XX_EXT_REG_CTL (0x14)
++#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
++#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
++
++/* External Register Read Data Register */
++#define LAN87XX_EXT_REG_RD_DATA (0x15)
++
++/* External Register Write Data Register */
++#define LAN87XX_EXT_REG_WR_DATA (0x16)
++
+ /* Interrupt Source Register */
+ #define LAN87XX_INTERRUPT_SOURCE (0x18)
+
+@@ -14,9 +26,160 @@
+ #define LAN87XX_MASK_LINK_UP (0x0004)
+ #define LAN87XX_MASK_LINK_DOWN (0x0002)
+
++/* phyaccess nested types */
++#define PHYACC_ATTR_MODE_READ 0
++#define PHYACC_ATTR_MODE_WRITE 1
++#define PHYACC_ATTR_MODE_MODIFY 2
++
++#define PHYACC_ATTR_BANK_SMI 0
++#define PHYACC_ATTR_BANK_MISC 1
++#define PHYACC_ATTR_BANK_PCS 2
++#define PHYACC_ATTR_BANK_AFE 3
++#define PHYACC_ATTR_BANK_MAX 7
++
+ #define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
+ #define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
+
++struct access_ereg_val {
++ u8 mode;
++ u8 bank;
++ u8 offset;
++ u16 val;
++ u16 mask;
++};
++
++static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
++ u8 offset, u16 val)
++{
++ u16 ereg = 0;
++ int rc = 0;
++
++ if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX)
++ return -EINVAL;
++
++ if (bank == PHYACC_ATTR_BANK_SMI) {
++ if (mode == PHYACC_ATTR_MODE_WRITE)
++ rc = phy_write(phydev, offset, val);
++ else
++ rc = phy_read(phydev, offset);
++ return rc;
++ }
++
++ if (mode == PHYACC_ATTR_MODE_WRITE) {
++ ereg = LAN87XX_EXT_REG_CTL_WR_CTL;
++ rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val);
++ if (rc < 0)
++ return rc;
++ } else {
++ ereg = LAN87XX_EXT_REG_CTL_RD_CTL;
++ }
++
++ ereg |= (bank << 8) | offset;
++
++ rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
++ if (rc < 0)
++ return rc;
++
++ if (mode == PHYACC_ATTR_MODE_READ)
++ rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA);
++
++ return rc;
++}
++
++static int access_ereg_modify_changed(struct phy_device *phydev,
++ u8 bank, u8 offset, u16 val, u16 mask)
++{
++ int new = 0, rc = 0;
++
++ if (bank > PHYACC_ATTR_BANK_MAX)
++ return -EINVAL;
++
++ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val);
++ if (rc < 0)
++ return rc;
++
++ new = val | (rc & (mask ^ 0xFFFF));
++ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new);
++
++ return rc;
++}
++
++static int lan87xx_phy_init(struct phy_device *phydev)
++{
++ static const struct access_ereg_val init[] = {
++ /* TX Amplitude = 5 */
++ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
++ 0x000A, 0x001E},
++ /* Clear SMI interrupts */
++ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
++ 0, 0},
++ /* Clear MISC interrupts */
++ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
++ 0, 0},
++ /* Turn on TC10 Ring Oscillator (ROSC) */
++ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
++ 0x0020, 0x0020},
++ /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
++ 0x283C, 0},
++ /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
++ 0x274F, 0},
++ /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
++ * and Wake_In to wake PHY
++ */
++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
++ 0x80A7, 0},
++ /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
++ * to 128 uS
++ */
++ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
++ 0xF110, 0},
++ /* Enable HW Init */
++ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
++ 0x0100, 0x0100},
++ };
++ int rc, i;
++
++ /* Start manual initialization procedures in Managed Mode */
++ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
++ 0x1a, 0x0000, 0x0100);
++ if (rc < 0)
++ return rc;
++
++ /* Soft Reset the SMI block */
++ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
++ 0x00, 0x8000, 0x8000);
++ if (rc < 0)
++ return rc;
++
++ /* Check to see if the self-clearing bit is cleared */
++ usleep_range(1000, 2000);
++ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
++ PHYACC_ATTR_BANK_SMI, 0x00, 0);
++ if (rc < 0)
++ return rc;
++ if ((rc & 0x8000) != 0)
++ return -ETIMEDOUT;
++
++ /* PHY Initialization */
++ for (i = 0; i < ARRAY_SIZE(init); i++) {
++ if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
++ rc = access_ereg_modify_changed(phydev, init[i].bank,
++ init[i].offset,
++ init[i].val,
++ init[i].mask);
++ } else {
++ rc = access_ereg(phydev, init[i].mode, init[i].bank,
++ init[i].offset, init[i].val);
++ }
++ if (rc < 0)
++ return rc;
++ }
++
++ return 0;
++}
++
+ static int lan87xx_phy_config_intr(struct phy_device *phydev)
+ {
+ int rc, val = 0;
+@@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
+ return rc < 0 ? rc : 0;
+ }
+
++static int lan87xx_config_init(struct phy_device *phydev)
++{
++ int rc = lan87xx_phy_init(phydev);
++
++ return rc < 0 ? rc : 0;
++}
++
+ static struct phy_driver microchip_t1_phy_driver[] = {
+ {
+ .phy_id = 0x0007c150,
+@@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
+
+ .features = PHY_BASIC_T1_FEATURES,
+
++ .config_init = lan87xx_config_init,
+ .config_aneg = genphy_config_aneg,
+
+ .ack_interrupt = lan87xx_phy_ack_interrupt,
+--
+2.20.1
+
--- /dev/null
+From 714b8012c7e37c469f43b5fe4768da9ceab1cf8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2020 08:18:03 +0200
+Subject: net/sonic: Fix a resource leak in an error handling path in
+ 'jazz_sonic_probe()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 10e3cc180e64385edc9890c6855acf5ed9ca1339 ]
+
+A call to 'dma_alloc_coherent()' is hidden in 'sonic_alloc_descriptors()',
+called from 'sonic_probe1()'.
+
+This is correctly freed in the remove function, but not in the error
+handling path of the probe function.
+Fix it and add the missing 'dma_free_coherent()' call.
+
+While at it, rename a label in order to be slightly more informative.
+
+Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/jazzsonic.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
+index 51fa82b429a3c..40970352d2082 100644
+--- a/drivers/net/ethernet/natsemi/jazzsonic.c
++++ b/drivers/net/ethernet/natsemi/jazzsonic.c
+@@ -235,11 +235,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
+
+ err = register_netdev(dev);
+ if (err)
+- goto out1;
++ goto undo_probe1;
+
+ return 0;
+
+-out1:
++undo_probe1:
++ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
++ lp->descriptors, lp->descriptors_laddr);
+ release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
+ out:
+ free_netdev(dev);
+--
+2.20.1
+
--- /dev/null
+From 880b21a3a7a5fefa900b2393b48d80eb7a755a51 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2020 15:10:16 +0100
+Subject: net: stmmac: gmac5+: fix potential integer overflow on 32 bit
+ multiply
+
+From: Colin Ian King <colin.king@canonical.com>
+
+[ Upstream commit 44d95cc6b10ff7439d45839c96c581cb4368c088 ]
+
+The multiplication of cfg->ctr[1] by 1000000000 is performed using a
+32 bit multiplication (since cfg->ctr[1] is a u32) and this can lead
+to a potential overflow. Fix this by making the constant a ULL to
+ensure a 64 bit multiply occurs.
+
+Fixes: 504723af0d85 ("net: stmmac: Add basic EST support for GMAC5+")
+Addresses-Coverity: ("Unintentional integer overflow")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac5.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index 494c859b4ade8..67ba67ed0cb99 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -624,7 +624,7 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ total_offset += offset;
+ }
+
+- total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
++ total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
+ total_ctr += total_offset;
+
+ ctr_low = do_div(total_ctr, 1000000000);
+--
+2.20.1
+
--- /dev/null
+From cd42b05d90334729011d8d988a5cdf4362a239cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 20:53:49 -0700
+Subject: net_sched: fix tcm_parent in tc filter dump
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit a7df4870d79b00742da6cc93ca2f336a71db77f7 ]
+
+When we tell kernel to dump filters from root (ffff:ffff),
+those filters on ingress (ffff:0000) are matched, but their
+true parents must be dumped as they are. However, kernel
+dumps just whatever we tell it, that is either ffff:ffff
+or ffff:0000:
+
+ $ nl-cls-list --dev=dummy0 --parent=root
+ cls basic dev dummy0 id none parent root prio 49152 protocol ip match-all
+ cls basic dev dummy0 id :1 parent root prio 49152 protocol ip match-all
+ $ nl-cls-list --dev=dummy0 --parent=ffff:
+ cls basic dev dummy0 id none parent ffff: prio 49152 protocol ip match-all
+ cls basic dev dummy0 id :1 parent ffff: prio 49152 protocol ip match-all
+
+This is confusing and misleading, more importantly this is
+a regression since 4.15, so the old behavior must be restored.
+
+And, when tc filters are installed on a tc class, the parent
+should be the classid, rather than the qdisc handle. Commit
+edf6711c9840 ("net: sched: remove classid and q fields from tcf_proto")
+removed the classid we save for filters, we can just restore
+this classid in tcf_block.
+
+Steps to reproduce this:
+ ip li set dev dummy0 up
+ tc qd add dev dummy0 ingress
+ tc filter add dev dummy0 parent ffff: protocol arp basic action pass
+ tc filter show dev dummy0 root
+
+Before this patch:
+ filter protocol arp pref 49152 basic
+ filter protocol arp pref 49152 basic handle 0x1
+ action order 1: gact action pass
+ random type none pass val 0
+ index 1 ref 1 bind 1
+
+After this patch:
+ filter parent ffff: protocol arp pref 49152 basic
+ filter parent ffff: protocol arp pref 49152 basic handle 0x1
+ action order 1: gact action pass
+ random type none pass val 0
+ index 1 ref 1 bind 1
+
+Fixes: a10fa20101ae ("net: sched: propagate q and parent from caller down to tcf_fill_node")
+Fixes: edf6711c9840 ("net: sched: remove classid and q fields from tcf_proto")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 1 +
+ net/sched/cls_api.c | 8 ++++----
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index c30f914867e64..f1f8acb14b674 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -407,6 +407,7 @@ struct tcf_block {
+ struct mutex lock;
+ struct list_head chain_list;
+ u32 index; /* block index for shared blocks */
++ u32 classid; /* which class this block belongs to */
+ refcount_t refcnt;
+ struct net *net;
+ struct Qdisc *q;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index c2cdd0fc2e709..68c8fc6f535c7 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2005,6 +2005,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ err = PTR_ERR(block);
+ goto errout;
+ }
++ block->classid = parent;
+
+ chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ if (chain_index > TC_ACT_EXT_VAL_MASK) {
+@@ -2547,12 +2548,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
+ return skb->len;
+
+ parent = tcm->tcm_parent;
+- if (!parent) {
++ if (!parent)
+ q = dev->qdisc;
+- parent = q->handle;
+- } else {
++ else
+ q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
+- }
+ if (!q)
+ goto out;
+ cops = q->ops->cl_ops;
+@@ -2568,6 +2567,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
+ block = cops->tcf_block(q, cl, NULL);
+ if (!block)
+ goto out;
++ parent = block->classid;
+ if (tcf_block_shared(block))
+ q = NULL;
+ }
+--
+2.20.1
+
--- /dev/null
+kvm-nvmx-consolidate-nested-mtf-checks-to-helper-fun.patch
+kvm-nvmx-reflect-mtf-vm-exits-if-injected-by-l1.patch
+xprtrdma-clean-up-the-post_send-path.patch
+xprtrdma-fix-trace-point-use-after-free-race.patch
+drm-i915-tgl-add-wa_14010477008-tgl.patch
+drm-i915-tgl-tbt-aux-should-use-tc-power-well-ops.patch
+drm-i915-display-load-dp_tp_ctl-status-offset-before.patch
+shmem-fix-possible-deadlocks-on-shmlock_user_lock.patch
+net-phy-microchip_t1-add-lan87xx_phy_init-to-initial.patch
+kvm-arm-vgic-synchronize-the-whole-guest-on-gic-d-r-.patch
+kvm-arm-vgic-v2-only-use-the-virtual-state-when-user.patch
+gpio-pca953x-fix-pca953x_gpio_set_config.patch
+sunrpc-add-len-parameter-to-gss_unwrap.patch
+sunrpc-fix-gss-privacy-computation-of-auth-au_ralign.patch
+net-sonic-fix-a-resource-leak-in-an-error-handling-p.patch
+net-moxa-fix-a-potential-double-free_irq.patch
+ftrace-selftests-workaround-cgroup-rt-scheduling-iss.patch
+hv_netvsc-fix-netvsc_start_xmit-s-return-type.patch
+net-make-ptp-specific-drivers-depend-on-ptp_1588_clo.patch
+drop_monitor-work-around-gcc-10-stringop-overflow-wa.patch
+virtio-blk-handle-block_device_operations-callbacks-.patch
+sun6i-dsi-fix-gcc-4.8.patch
+net_sched-fix-tcm_parent-in-tc-filter-dump.patch
+net-stmmac-gmac5-fix-potential-integer-overflow-on-3.patch
+iommu-amd-fix-race-in-increase_address_space-fetch_p.patch
+iommu-amd-update-device-table-in-increase_address_sp.patch
+net-dsa-ocelot-the-mac-table-on-felix-is-twice-as-la.patch
+net-mscc-ocelot-ana_autoage_age_period-holds-a-value.patch
--- /dev/null
+From 848d0337eca75031cf4419b4a5ca9bc53bda4588 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Apr 2020 18:14:14 -0700
+Subject: shmem: fix possible deadlocks on shmlock_user_lock
+
+From: Hugh Dickins <hughd@google.com>
+
+[ Upstream commit ea0dfeb4209b4eab954d6e00ed136bc6b48b380d ]
+
+Recent commit 71725ed10c40 ("mm: huge tmpfs: try to split_huge_page()
+when punching hole") has allowed syzkaller to probe deeper, uncovering a
+long-standing lockdep issue between the irq-unsafe shmlock_user_lock,
+the irq-safe xa_lock on mapping->i_pages, and shmem inode's info->lock
+which nests inside xa_lock (or tree_lock) since 4.8's shmem_uncharge().
+
+user_shm_lock(), servicing SysV shmctl(SHM_LOCK), wants
+shmlock_user_lock while its caller shmem_lock() holds info->lock with
+interrupts disabled; but hugetlbfs_file_setup() calls user_shm_lock()
+with interrupts enabled, and might be interrupted by a writeback endio
+wanting xa_lock on i_pages.
+
+This may not risk an actual deadlock, since shmem inodes do not take
+part in writeback accounting, but there are several easy ways to avoid
+it.
+
+Requiring interrupts disabled for shmlock_user_lock would be easy, but
+it's a high-level global lock for which that seems inappropriate.
+Instead, recall that the use of info->lock to guard info->flags in
+shmem_lock() dates from pre-3.1 days, when races with SHMEM_PAGEIN and
+SHMEM_TRUNCATE could occur: nowadays it serves no purpose, the only flag
+added or removed is VM_LOCKED itself, and calls to shmem_lock() an inode
+are already serialized by the caller.
+
+Take info->lock out of the chain and the possibility of deadlock or
+lockdep warning goes away.
+
+Fixes: 4595ef88d136 ("shmem: make shmem_inode_info::lock irq-safe")
+Reported-by: syzbot+c8a8197c8852f566b9d9@syzkaller.appspotmail.com
+Reported-by: syzbot+40b71e145e73f78f81ad@syzkaller.appspotmail.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Yang Shi <yang.shi@linux.alibaba.com>
+Cc: Yang Shi <yang.shi@linux.alibaba.com>
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2004161707410.16322@eggly.anvils
+Link: https://lore.kernel.org/lkml/000000000000e5838c05a3152f53@google.com/
+Link: https://lore.kernel.org/lkml/0000000000003712b305a331d3b1@google.com/
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/shmem.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 7406f91f8a528..153d889e32d1d 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2184,7 +2184,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ int retval = -ENOMEM;
+
+- spin_lock_irq(&info->lock);
++ /*
++ * What serializes the accesses to info->flags?
++ * ipc_lock_object() when called from shmctl_do_lock(),
++ * no serialization needed when called from shm_destroy().
++ */
+ if (lock && !(info->flags & VM_LOCKED)) {
+ if (!user_shm_lock(inode->i_size, user))
+ goto out_nomem;
+@@ -2199,7 +2203,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
+ retval = 0;
+
+ out_nomem:
+- spin_unlock_irq(&info->lock);
+ return retval;
+ }
+
+--
+2.20.1
+
--- /dev/null
+From 9df135a2618b166c1f27e50485b505e857298e9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 23:50:51 +0200
+Subject: sun6i: dsi: fix gcc-4.8
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 3a3a71f97c30983f1627c2c550d43566e9b634d2 ]
+
+Older compilers warn about initializers with incorrect curly
+braces:
+
+drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c: In function 'sun6i_dsi_encoder_enable':
+drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c:720:8: error: missing braces around initializer [-Werror=missing-braces]
+ union phy_configure_opts opts = { 0 };
+ ^
+drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c:720:8: error: (near initialization for 'opts.mipi_dphy') [-Werror=missing-braces]
+
+Use the GNU empty initializer extension to avoid this.
+
+Fixes: bb3b6fcb6849 ("sun6i: dsi: Convert to generic phy handling")
+Reviewed-by: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200428215105.3928459-1-arnd@arndb.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+index a75fcb1131724..2b6d77ca3dfc2 100644
+--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+@@ -719,7 +719,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
+ struct mipi_dsi_device *device = dsi->device;
+- union phy_configure_opts opts = { 0 };
++ union phy_configure_opts opts = { };
+ struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
+ u16 delay;
+
+--
+2.20.1
+
--- /dev/null
+From 39a30ffe5d05424859e30b40f12cd1f35d0e459f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 18 Apr 2020 21:06:23 -0400
+Subject: SUNRPC: Add "@len" parameter to gss_unwrap()
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 31c9590ae468478fe47dc0f5f0d3562b2f69450e ]
+
+Refactor: This is a pre-requisite to fixing the client-side ralign
+computation in gss_unwrap_resp_priv().
+
+The length value is passed in explicitly rather that as the value
+of buf->len. This will subsequently allow gss_unwrap_kerberos_v1()
+to compute a slack and align value, instead of computing it in
+gss_unwrap_resp_priv().
+
+Fixes: 35e77d21baa0 ("SUNRPC: Add rpc_auth::au_ralign field")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sunrpc/gss_api.h | 2 ++
+ include/linux/sunrpc/gss_krb5.h | 6 +++---
+ net/sunrpc/auth_gss/auth_gss.c | 4 ++--
+ net/sunrpc/auth_gss/gss_krb5_crypto.c | 8 ++++----
+ net/sunrpc/auth_gss/gss_krb5_wrap.c | 26 +++++++++++++++-----------
+ net/sunrpc/auth_gss/gss_mech_switch.c | 3 ++-
+ net/sunrpc/auth_gss/svcauth_gss.c | 8 ++------
+ 7 files changed, 30 insertions(+), 27 deletions(-)
+
+diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
+index 48c1b1674cbf0..e9a79518d6527 100644
+--- a/include/linux/sunrpc/gss_api.h
++++ b/include/linux/sunrpc/gss_api.h
+@@ -66,6 +66,7 @@ u32 gss_wrap(
+ u32 gss_unwrap(
+ struct gss_ctx *ctx_id,
+ int offset,
++ int len,
+ struct xdr_buf *inbuf);
+ u32 gss_delete_sec_context(
+ struct gss_ctx **ctx_id);
+@@ -126,6 +127,7 @@ struct gss_api_ops {
+ u32 (*gss_unwrap)(
+ struct gss_ctx *ctx_id,
+ int offset,
++ int len,
+ struct xdr_buf *buf);
+ void (*gss_delete_sec_context)(
+ void *internal_ctx_id);
+diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
+index c1d77dd8ed416..e8f8ffe7448b2 100644
+--- a/include/linux/sunrpc/gss_krb5.h
++++ b/include/linux/sunrpc/gss_krb5.h
+@@ -83,7 +83,7 @@ struct gss_krb5_enctype {
+ u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf,
+ struct page **pages); /* v2 encryption function */
+- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
++ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
+ struct xdr_buf *buf, u32 *headskip,
+ u32 *tailskip); /* v2 decryption function */
+ };
+@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
+ struct xdr_buf *outbuf, struct page **pages);
+
+ u32
+-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
++gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
+ struct xdr_buf *buf);
+
+
+@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
+ struct page **pages);
+
+ u32
+-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
++gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
+ struct xdr_buf *buf, u32 *plainoffset,
+ u32 *plainlen);
+
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 2dc740acb3bf3..a08a733f2d7c2 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -2041,9 +2041,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
+ offset = (u8 *)(p) - (u8 *)head->iov_base;
+ if (offset + opaque_len > rcv_buf->len)
+ goto unwrap_failed;
+- rcv_buf->len = offset + opaque_len;
+
+- maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
++ maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
++ offset + opaque_len, rcv_buf);
+ if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+ clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ if (maj_stat != GSS_S_COMPLETE)
+diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
+index 6f2d30d7b766d..e7180da1fc6a1 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
+@@ -851,8 +851,8 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
+ }
+
+ u32
+-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
+- u32 *headskip, u32 *tailskip)
++gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
++ struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
+ {
+ struct xdr_buf subbuf;
+ u32 ret = 0;
+@@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
+
+ /* create a segment skipping the header and leaving out the checksum */
+ xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
+- (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
++ (len - offset - GSS_KRB5_TOK_HDR_LEN -
+ kctx->gk5e->cksumlength));
+
+ nblocks = (subbuf.len + blocksize - 1) / blocksize;
+@@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
+ goto out_err;
+
+ /* Get the packet's hmac value */
+- ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
++ ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
+ pkt_hmac, kctx->gk5e->cksumlength);
+ if (ret)
+ goto out_err;
+diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+index 6c1920eed7717..c7589e35d5d92 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
++++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+@@ -261,7 +261,8 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
+ }
+
+ static u32
+-gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
++gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
++ struct xdr_buf *buf)
+ {
+ int signalg;
+ int sealalg;
+@@ -284,7 +285,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+
+ ptr = (u8 *)buf->head[0].iov_base + offset;
+ if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
+- buf->len - offset))
++ len - offset))
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
+@@ -324,6 +325,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+ (!kctx->initiate && direction != 0))
+ return GSS_S_BAD_SIG;
+
++ buf->len = len;
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
+ struct crypto_sync_skcipher *cipher;
+ int err;
+@@ -376,7 +378,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+ data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
+ memmove(orig_start, data_start, data_len);
+ buf->head[0].iov_len -= (data_start - orig_start);
+- buf->len -= (data_start - orig_start);
++ buf->len = len - (data_start - orig_start);
+
+ if (gss_krb5_remove_padding(buf, blocksize))
+ return GSS_S_DEFECTIVE_TOKEN;
+@@ -486,7 +488,8 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
+ }
+
+ static u32
+-gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
++gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
++ struct xdr_buf *buf)
+ {
+ time64_t now;
+ u8 *ptr;
+@@ -532,7 +535,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+ if (rrc != 0)
+ rotate_left(offset + 16, buf, rrc);
+
+- err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
++ err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
+ &headskip, &tailskip);
+ if (err)
+ return GSS_S_FAILURE;
+@@ -542,7 +545,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+ * it against the original
+ */
+ err = read_bytes_from_xdr_buf(buf,
+- buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
++ len - GSS_KRB5_TOK_HDR_LEN - tailskip,
+ decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
+ if (err) {
+ dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
+@@ -568,14 +571,14 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+ * Note that buf->head[0].iov_len may indicate the available
+ * head buffer space rather than that actually occupied.
+ */
+- movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
++ movelen = min_t(unsigned int, buf->head[0].iov_len, len);
+ movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
+ if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
+ buf->head[0].iov_len)
+ return GSS_S_FAILURE;
+ memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
+ buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+- buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
++ buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
+
+ /* Trim off the trailing "extra count" and checksum blob */
+ buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
+@@ -603,7 +606,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
+ }
+
+ u32
+-gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
++gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
++ int len, struct xdr_buf *buf)
+ {
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+
+@@ -613,9 +617,9 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+- return gss_unwrap_kerberos_v1(kctx, offset, buf);
++ return gss_unwrap_kerberos_v1(kctx, offset, len, buf);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+- return gss_unwrap_kerberos_v2(kctx, offset, buf);
++ return gss_unwrap_kerberos_v2(kctx, offset, len, buf);
+ }
+ }
+diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
+index db550bfc2642e..69316ab1b9fac 100644
+--- a/net/sunrpc/auth_gss/gss_mech_switch.c
++++ b/net/sunrpc/auth_gss/gss_mech_switch.c
+@@ -411,10 +411,11 @@ gss_wrap(struct gss_ctx *ctx_id,
+ u32
+ gss_unwrap(struct gss_ctx *ctx_id,
+ int offset,
++ int len,
+ struct xdr_buf *buf)
+ {
+ return ctx_id->mech_type->gm_ops
+- ->gss_unwrap(ctx_id, offset, buf);
++ ->gss_unwrap(ctx_id, offset, len, buf);
+ }
+
+
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 65b67b2573021..559053646e12c 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -928,7 +928,7 @@ static int
+ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
+ {
+ u32 priv_len, maj_stat;
+- int pad, saved_len, remaining_len, offset;
++ int pad, remaining_len, offset;
+
+ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+
+@@ -948,12 +948,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+ buf->len -= pad;
+ fix_priv_head(buf, pad);
+
+- /* Maybe it would be better to give gss_unwrap a length parameter: */
+- saved_len = buf->len;
+- buf->len = priv_len;
+- maj_stat = gss_unwrap(ctx, 0, buf);
++ maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
+ pad = priv_len - buf->len;
+- buf->len = saved_len;
+ buf->len -= pad;
+ /* The upper layers assume the buffer is aligned on 4-byte boundaries.
+ * In the krb5p case, at least, the data ends up offset, so we need to
+--
+2.20.1
+
--- /dev/null
+From 7f25a29de97a3462ca867fa6918ba67220e9839f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 18 Apr 2020 14:38:19 -0400
+Subject: SUNRPC: Fix GSS privacy computation of auth->au_ralign
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit a7e429a6fa6d612d1dacde96c885dc1bb4a9f400 ]
+
+When the au_ralign field was added to gss_unwrap_resp_priv, the
+wrong calculation was used. Setting au_rslack == au_ralign is
+probably correct for kerberos_v1 privacy, but kerberos_v2 privacy
+adds additional GSS data after the clear text RPC message.
+au_ralign needs to be smaller than au_rslack in that fairly common
+case.
+
+When xdr_buf_trim() is restored to gss_unwrap_kerberos_v2(), it does
+exactly what I feared it would: it trims off part of the clear text
+RPC message. However, that's because rpc_prepare_reply_pages() does
+not set up the rq_rcv_buf's tail correctly because au_ralign is too
+large.
+
+Fixing the au_ralign computation also corrects the alignment of
+rq_rcv_buf->pages so that the client does not have to shift reply
+data payloads after they are received.
+
+Fixes: 35e77d21baa0 ("SUNRPC: Add rpc_auth::au_ralign field")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sunrpc/gss_api.h | 1 +
+ net/sunrpc/auth_gss/auth_gss.c | 8 +++-----
+ net/sunrpc/auth_gss/gss_krb5_wrap.c | 19 +++++++++++++++----
+ 3 files changed, 19 insertions(+), 9 deletions(-)
+
+diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
+index e9a79518d6527..bc07e51f20d1c 100644
+--- a/include/linux/sunrpc/gss_api.h
++++ b/include/linux/sunrpc/gss_api.h
+@@ -21,6 +21,7 @@
+ struct gss_ctx {
+ struct gss_api_mech *mech_type;
+ void *internal_ctx_id;
++ unsigned int slack, align;
+ };
+
+ #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index a08a733f2d7c2..a7ad150fd4ee9 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -2030,7 +2030,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
+ struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
+ struct kvec *head = rqstp->rq_rcv_buf.head;
+ struct rpc_auth *auth = cred->cr_auth;
+- unsigned int savedlen = rcv_buf->len;
+ u32 offset, opaque_len, maj_stat;
+ __be32 *p;
+
+@@ -2057,10 +2056,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
+ */
+ xdr_init_decode(xdr, rcv_buf, p, rqstp);
+
+- auth->au_rslack = auth->au_verfsize + 2 +
+- XDR_QUADLEN(savedlen - rcv_buf->len);
+- auth->au_ralign = auth->au_verfsize + 2 +
+- XDR_QUADLEN(savedlen - rcv_buf->len);
++ auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
++ auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
++
+ return 0;
+ unwrap_failed:
+ trace_rpcgss_unwrap_failed(task);
+diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+index c7589e35d5d92..4905652e75679 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
++++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+@@ -262,7 +262,8 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
+
+ static u32
+ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
+- struct xdr_buf *buf)
++ struct xdr_buf *buf, unsigned int *slack,
++ unsigned int *align)
+ {
+ int signalg;
+ int sealalg;
+@@ -280,6 +281,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
+ u32 conflen = kctx->gk5e->conflen;
+ int crypt_offset;
+ u8 *cksumkey;
++ unsigned int saved_len = buf->len;
+
+ dprintk("RPC: gss_unwrap_kerberos\n");
+
+@@ -383,6 +385,10 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
+ if (gss_krb5_remove_padding(buf, blocksize))
+ return GSS_S_DEFECTIVE_TOKEN;
+
++ /* slack must include room for krb5 padding */
++ *slack = XDR_QUADLEN(saved_len - buf->len);
++ /* The GSS blob always precedes the RPC message payload */
++ *align = *slack;
+ return GSS_S_COMPLETE;
+ }
+
+@@ -489,7 +495,8 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
+
+ static u32
+ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
+- struct xdr_buf *buf)
++ struct xdr_buf *buf, unsigned int *slack,
++ unsigned int *align)
+ {
+ time64_t now;
+ u8 *ptr;
+@@ -583,6 +590,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
+ /* Trim off the trailing "extra count" and checksum blob */
+ buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
+
++ *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
++ *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
+ return GSS_S_COMPLETE;
+ }
+
+@@ -617,9 +626,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+- return gss_unwrap_kerberos_v1(kctx, offset, len, buf);
++ return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
++ &gctx->slack, &gctx->align);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+- return gss_unwrap_kerberos_v2(kctx, offset, len, buf);
++ return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
++ &gctx->slack, &gctx->align);
+ }
+ }
+--
+2.20.1
+
--- /dev/null
+From 9c8699713432310dbedf73a345f8e0c8fe250f68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 15:04:42 +0100
+Subject: virtio-blk: handle block_device_operations callbacks after hot unplug
+
+From: Stefan Hajnoczi <stefanha@redhat.com>
+
+[ Upstream commit 90b5feb8c4bebc76c27fcaf3e1a0e5ca2d319e9e ]
+
+A userspace process holding a file descriptor to a virtio_blk device can
+still invoke block_device_operations after hot unplug. This leads to a
+use-after-free accessing vblk->vdev in virtblk_getgeo() when
+ioctl(HDIO_GETGEO) is invoked:
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000090
+ IP: [<ffffffffc00e5450>] virtio_check_driver_offered_feature+0x10/0x90 [virtio]
+ PGD 800000003a92f067 PUD 3a930067 PMD 0
+ Oops: 0000 [#1] SMP
+ CPU: 0 PID: 1310 Comm: hdio-getgeo Tainted: G OE ------------ 3.10.0-1062.el7.x86_64 #1
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+ task: ffff9be5fbfb8000 ti: ffff9be5fa890000 task.ti: ffff9be5fa890000
+ RIP: 0010:[<ffffffffc00e5450>] [<ffffffffc00e5450>] virtio_check_driver_offered_feature+0x10/0x90 [virtio]
+ RSP: 0018:ffff9be5fa893dc8 EFLAGS: 00010246
+ RAX: ffff9be5fc3f3400 RBX: ffff9be5fa893e30 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffff9be5fbc10b40
+ RBP: ffff9be5fa893dc8 R08: 0000000000000301 R09: 0000000000000301
+ R10: 0000000000000000 R11: 0000000000000000 R12: ffff9be5fdc24680
+ R13: ffff9be5fbc10b40 R14: ffff9be5fbc10480 R15: 0000000000000000
+ FS: 00007f1bfb968740(0000) GS:ffff9be5ffc00000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000090 CR3: 000000003a894000 CR4: 0000000000360ff0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ [<ffffffffc016ac37>] virtblk_getgeo+0x47/0x110 [virtio_blk]
+ [<ffffffff8d3f200d>] ? handle_mm_fault+0x39d/0x9b0
+ [<ffffffff8d561265>] blkdev_ioctl+0x1f5/0xa20
+ [<ffffffff8d488771>] block_ioctl+0x41/0x50
+ [<ffffffff8d45d9e0>] do_vfs_ioctl+0x3a0/0x5a0
+ [<ffffffff8d45dc81>] SyS_ioctl+0xa1/0xc0
+
+A related problem is that virtblk_remove() leaks the vd_index_ida index
+when something still holds a reference to vblk->disk during hot unplug.
+This causes virtio-blk device names to be lost (vda, vdb, etc).
+
+Fix these issues by protecting vblk->vdev with a mutex and reference
+counting vblk so the vd_index_ida index can be removed in all cases.
+
+Fixes: 48e4043d4529 ("virtio: add virtio disk geometry feature")
+Reported-by: Lance Digby <ldigby@redhat.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Link: https://lore.kernel.org/r/20200430140442.171016-1-stefanha@redhat.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/virtio_blk.c | 86 ++++++++++++++++++++++++++++++++++----
+ 1 file changed, 78 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 0736248999b0d..d52f33881ab6e 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -32,6 +32,15 @@ struct virtio_blk_vq {
+ } ____cacheline_aligned_in_smp;
+
+ struct virtio_blk {
++ /*
++ * This mutex must be held by anything that may run after
++ * virtblk_remove() sets vblk->vdev to NULL.
++ *
++ * blk-mq, virtqueue processing, and sysfs attribute code paths are
++ * shut down before vblk->vdev is set to NULL and therefore do not need
++ * to hold this mutex.
++ */
++ struct mutex vdev_mutex;
+ struct virtio_device *vdev;
+
+ /* The disk structure for the kernel. */
+@@ -43,6 +52,13 @@ struct virtio_blk {
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
++ /*
++ * Tracks references from block_device_operations open/release and
++ * virtio_driver probe/remove so this object can be freed once no
++ * longer in use.
++ */
++ refcount_t refs;
++
+ /* What host tells us, plus 2 for header & tailer. */
+ unsigned int sg_elems;
+
+@@ -294,10 +310,55 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
+ return err;
+ }
+
++static void virtblk_get(struct virtio_blk *vblk)
++{
++ refcount_inc(&vblk->refs);
++}
++
++static void virtblk_put(struct virtio_blk *vblk)
++{
++ if (refcount_dec_and_test(&vblk->refs)) {
++ ida_simple_remove(&vd_index_ida, vblk->index);
++ mutex_destroy(&vblk->vdev_mutex);
++ kfree(vblk);
++ }
++}
++
++static int virtblk_open(struct block_device *bd, fmode_t mode)
++{
++ struct virtio_blk *vblk = bd->bd_disk->private_data;
++ int ret = 0;
++
++ mutex_lock(&vblk->vdev_mutex);
++
++ if (vblk->vdev)
++ virtblk_get(vblk);
++ else
++ ret = -ENXIO;
++
++ mutex_unlock(&vblk->vdev_mutex);
++ return ret;
++}
++
++static void virtblk_release(struct gendisk *disk, fmode_t mode)
++{
++ struct virtio_blk *vblk = disk->private_data;
++
++ virtblk_put(vblk);
++}
++
+ /* We provide getgeo only to please some old bootloader/partitioning tools */
+ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+ {
+ struct virtio_blk *vblk = bd->bd_disk->private_data;
++ int ret = 0;
++
++ mutex_lock(&vblk->vdev_mutex);
++
++ if (!vblk->vdev) {
++ ret = -ENXIO;
++ goto out;
++ }
+
+ /* see if the host passed in geometry config */
+ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
+@@ -313,11 +374,15 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+ geo->sectors = 1 << 5;
+ geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ }
+- return 0;
++out:
++ mutex_unlock(&vblk->vdev_mutex);
++ return ret;
+ }
+
+ static const struct block_device_operations virtblk_fops = {
+ .owner = THIS_MODULE,
++ .open = virtblk_open,
++ .release = virtblk_release,
+ .getgeo = virtblk_getgeo,
+ };
+
+@@ -657,6 +722,10 @@ static int virtblk_probe(struct virtio_device *vdev)
+ goto out_free_index;
+ }
+
++ /* This reference is dropped in virtblk_remove(). */
++ refcount_set(&vblk->refs, 1);
++ mutex_init(&vblk->vdev_mutex);
++
+ vblk->vdev = vdev;
+ vblk->sg_elems = sg_elems;
+
+@@ -822,8 +891,6 @@ static int virtblk_probe(struct virtio_device *vdev)
+ static void virtblk_remove(struct virtio_device *vdev)
+ {
+ struct virtio_blk *vblk = vdev->priv;
+- int index = vblk->index;
+- int refc;
+
+ /* Make sure no work handler is accessing the device. */
+ flush_work(&vblk->config_work);
+@@ -833,18 +900,21 @@ static void virtblk_remove(struct virtio_device *vdev)
+
+ blk_mq_free_tag_set(&vblk->tag_set);
+
++ mutex_lock(&vblk->vdev_mutex);
++
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+- refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
++ /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
++ vblk->vdev = NULL;
++
+ put_disk(vblk->disk);
+ vdev->config->del_vqs(vdev);
+ kfree(vblk->vqs);
+- kfree(vblk);
+
+- /* Only free device id if we don't have any users */
+- if (refc == 1)
+- ida_simple_remove(&vd_index_ida, index);
++ mutex_unlock(&vblk->vdev_mutex);
++
++ virtblk_put(vblk);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+--
+2.20.1
+
--- /dev/null
+From 9ff0a6f5ed26dd765b263f1c88da0de04446df72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Feb 2020 17:00:23 -0500
+Subject: xprtrdma: Clean up the post_send path
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 97d0de8812a10a66510ff95f8fe6e8d3053fd2ca ]
+
+Clean up: Simplify the synopses of functions in the post_send path
+by combining the struct rpcrdma_ia and struct rpcrdma_ep arguments.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtrdma/backchannel.c | 2 +-
+ net/sunrpc/xprtrdma/frwr_ops.c | 14 +++++++++-----
+ net/sunrpc/xprtrdma/transport.c | 2 +-
+ net/sunrpc/xprtrdma/verbs.c | 13 +++++--------
+ net/sunrpc/xprtrdma/xprt_rdma.h | 5 ++---
+ 5 files changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
+index 1a0ae0c61353c..4b43910a6ed21 100644
+--- a/net/sunrpc/xprtrdma/backchannel.c
++++ b/net/sunrpc/xprtrdma/backchannel.c
+@@ -115,7 +115,7 @@ int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
+ if (rc < 0)
+ goto failed_marshal;
+
+- if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
++ if (rpcrdma_post_sends(r_xprt, req))
+ goto drop_connection;
+ return 0;
+
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 125297c9aa3e7..79059d48f52b7 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -372,18 +372,22 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
+ }
+
+ /**
+- * frwr_send - post Send WR containing the RPC Call message
+- * @ia: interface adapter
+- * @req: Prepared RPC Call
++ * frwr_send - post Send WRs containing the RPC Call message
++ * @r_xprt: controlling transport instance
++ * @req: prepared RPC Call
+ *
+ * For FRWR, chain any FastReg WRs to the Send WR. Only a
+ * single ib_post_send call is needed to register memory
+ * and then post the Send WR.
+ *
+- * Returns the result of ib_post_send.
++ * Returns the return code from ib_post_send.
++ *
++ * Caller must hold the transport send lock to ensure that the
++ * pointers to the transport's rdma_cm_id and QP are stable.
+ */
+-int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
++int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ {
++ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct ib_send_wr *post_wr;
+ struct rpcrdma_mr *mr;
+
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 3cfeba68ee9a1..46e7949788e1a 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -694,7 +694,7 @@ xprt_rdma_send_request(struct rpc_rqst *rqst)
+ goto drop_connection;
+ rqst->rq_xtime = ktime_get();
+
+- if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
++ if (rpcrdma_post_sends(r_xprt, req))
+ goto drop_connection;
+
+ rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 353f61ac8d519..4b9fbf69b4955 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1502,20 +1502,17 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
+ }
+
+ /**
+- * rpcrdma_ep_post - Post WRs to a transport's Send Queue
+- * @ia: transport's device information
+- * @ep: transport's RDMA endpoint information
++ * rpcrdma_post_sends - Post WRs to a transport's Send Queue
++ * @r_xprt: controlling transport instance
+ * @req: rpcrdma_req containing the Send WR to post
+ *
+ * Returns 0 if the post was successful, otherwise -ENOTCONN
+ * is returned.
+ */
+-int
+-rpcrdma_ep_post(struct rpcrdma_ia *ia,
+- struct rpcrdma_ep *ep,
+- struct rpcrdma_req *req)
++int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ {
+ struct ib_send_wr *send_wr = &req->rl_wr;
++ struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ int rc;
+
+ if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) {
+@@ -1526,7 +1523,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
+ --ep->rep_send_count;
+ }
+
+- rc = frwr_send(ia, req);
++ rc = frwr_send(r_xprt, req);
+ trace_xprtrdma_post_send(req, rc);
+ if (rc)
+ return -ENOTCONN;
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 37d5080c250b8..600574a0d8387 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -469,8 +469,7 @@ void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt);
+ int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
+ void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
+
+-int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
+- struct rpcrdma_req *);
++int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
+ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
+
+ /*
+@@ -544,7 +543,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_mr_seg *seg,
+ int nsegs, bool writing, __be32 xid,
+ struct rpcrdma_mr *mr);
+-int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
++int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
+ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
+ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
+ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
+--
+2.20.1
+
--- /dev/null
+From cb2f293cf3b8c5fc792c101f65ede5c9f97edda4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Apr 2020 20:03:05 -0400
+Subject: xprtrdma: Fix trace point use-after-free race
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit bdb2ce82818577ba6e57b7d68b698b8d17329281 ]
+
+It's not safe to use resources pointed to by the @send_wr of
+ib_post_send() _after_ that function returns. Those resources are
+typically freed by the Send completion handler, which can run before
+ib_post_send() returns.
+
+Thus the trace points currently around ib_post_send() in the
+client's RPC/RDMA transport are a hazard, even when they are
+disabled. Rearrange them so that they touch the Work Request only
+_before_ ib_post_send() is invoked.
+
+Fixes: ab03eff58eb5 ("xprtrdma: Add trace points in RPC Call transmit paths")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/rpcrdma.h | 12 ++++--------
+ net/sunrpc/xprtrdma/verbs.c | 2 +-
+ 2 files changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
+index fa14adf242353..43158151821c4 100644
+--- a/include/trace/events/rpcrdma.h
++++ b/include/trace/events/rpcrdma.h
+@@ -721,11 +721,10 @@ TRACE_EVENT(xprtrdma_prepsend_failed,
+
+ TRACE_EVENT(xprtrdma_post_send,
+ TP_PROTO(
+- const struct rpcrdma_req *req,
+- int status
++ const struct rpcrdma_req *req
+ ),
+
+- TP_ARGS(req, status),
++ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+@@ -734,7 +733,6 @@ TRACE_EVENT(xprtrdma_post_send,
+ __field(unsigned int, client_id)
+ __field(int, num_sge)
+ __field(int, signaled)
+- __field(int, status)
+ ),
+
+ TP_fast_assign(
+@@ -747,15 +745,13 @@ TRACE_EVENT(xprtrdma_post_send,
+ __entry->sc = req->rl_sendctx;
+ __entry->num_sge = req->rl_wr.num_sge;
+ __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
+- __entry->status = status;
+ ),
+
+- TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d",
++ TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->sc, __entry->num_sge,
+ (__entry->num_sge == 1 ? "" : "s"),
+- (__entry->signaled ? "signaled " : ""),
+- __entry->status
++ (__entry->signaled ? "signaled" : "")
+ )
+ );
+
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 4b9fbf69b4955..a48b99f3682c3 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1523,8 +1523,8 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ --ep->rep_send_count;
+ }
+
++ trace_xprtrdma_post_send(req);
+ rc = frwr_send(r_xprt, req);
+- trace_xprtrdma_post_send(req, rc);
+ if (rc)
+ return -ENOTCONN;
+ return 0;
+--
+2.20.1
+