--- /dev/null
+From ccff7bd468d5e0595176656a051ef67c01f01968 Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Thu, 30 Jul 2020 20:31:38 +0800
+Subject: ASoC: amd: renoir: restore two more registers during resume
+
+From: Hui Wang <hui.wang@canonical.com>
+
+commit ccff7bd468d5e0595176656a051ef67c01f01968 upstream.
+
+Recently we found an issue about the suspend and resume. If dmic is
+recording the sound, and we run suspend and resume, after the resume,
+the dmic can't work well anymore. we need to close the app and reopen
+the app, then the dmic could record the sound again.
+
+For example, we run "arecord -D hw:CARD=acp,DEV=0 -f S32_LE -c 2
+-r 48000 test.wav", then suspend and resume, after the system resume
+back, we speak to the dmic. then stop the arecord, use aplay to play
+the test.wav, we could hear the sound recorded after resume is weird,
+it is not what we speak to the dmic.
+
+I found two registers are set in the dai_hw_params(), if the two
+registers are set during the resume, this issue could be fixed.
+Move the code of the dai_hw_params() into the pdm_dai_trigger(), then
+these two registers will be set during resume since pdm_dai_trigger()
+will be called during resume. And delete the empty function
+dai_hw_params().
+
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Reviewed-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200730123138.5659-1-hui.wang@canonical.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/amd/renoir/acp3x-pdm-dma.c | 29 +++++++++--------------------
+ 1 file changed, 9 insertions(+), 20 deletions(-)
+
+--- a/sound/soc/amd/renoir/acp3x-pdm-dma.c
++++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c
+@@ -314,40 +314,30 @@ static int acp_pdm_dma_close(struct snd_
+ return 0;
+ }
+
+-static int acp_pdm_dai_hw_params(struct snd_pcm_substream *substream,
+- struct snd_pcm_hw_params *params,
+- struct snd_soc_dai *dai)
++static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
++ int cmd, struct snd_soc_dai *dai)
+ {
+ struct pdm_stream_instance *rtd;
++ int ret;
++ bool pdm_status;
+ unsigned int ch_mask;
+
+ rtd = substream->runtime->private_data;
+- switch (params_channels(params)) {
++ ret = 0;
++ switch (substream->runtime->channels) {
+ case TWO_CH:
+ ch_mask = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+- rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
+- rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
+- ACP_WOV_PDM_DECIMATION_FACTOR);
+- return 0;
+-}
+-
+-static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
+- int cmd, struct snd_soc_dai *dai)
+-{
+- struct pdm_stream_instance *rtd;
+- int ret;
+- bool pdm_status;
+-
+- rtd = substream->runtime->private_data;
+- ret = 0;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
++ rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
++ ACP_WOV_PDM_DECIMATION_FACTOR);
+ rtd->bytescount = acp_pdm_get_byte_count(rtd,
+ substream->stream);
+ pdm_status = check_pdm_dma_status(rtd->acp_base);
+@@ -369,7 +359,6 @@ static int acp_pdm_dai_trigger(struct sn
+ }
+
+ static struct snd_soc_dai_ops acp_pdm_dai_ops = {
+- .hw_params = acp_pdm_dai_hw_params,
+ .trigger = acp_pdm_dai_trigger,
+ };
+
--- /dev/null
+From af804b7826350d5af728dca4715e473338fbd7e5 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <linux@rempel-privat.de>
+Date: Fri, 7 Aug 2020 12:51:58 +0200
+Subject: can: j1939: socket: j1939_sk_bind(): make sure ml_priv is allocated
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit af804b7826350d5af728dca4715e473338fbd7e5 upstream.
+
+This patch adds check to ensure that the struct net_device::ml_priv is
+allocated, as it is used later by the j1939 stack.
+
+The allocation is done by all mainline CAN network drivers, but when using
+bond or team devices this is not the case.
+
+Bail out if no ml_priv is allocated.
+
+Reported-by: syzbot+f03d384f3455d28833eb@syzkaller.appspotmail.com
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Cc: linux-stable <stable@vger.kernel.org> # >= v5.4
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20200807105200.26441-4-o.rempel@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/can/j1939/socket.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -466,6 +466,14 @@ static int j1939_sk_bind(struct socket *
+ goto out_release_sock;
+ }
+
++ if (!ndev->ml_priv) {
++ netdev_warn_once(ndev,
++ "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
++ dev_put(ndev);
++ ret = -ENODEV;
++ goto out_release_sock;
++ }
++
+ priv = j1939_netdev_start(ndev);
+ dev_put(ndev);
+ if (IS_ERR(priv)) {
--- /dev/null
+From cd3b3636c99fcac52c598b64061f3fe4413c6a12 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <linux@rempel-privat.de>
+Date: Fri, 7 Aug 2020 12:51:57 +0200
+Subject: can: j1939: transport: j1939_session_tx_dat(): fix use-after-free read in j1939_tp_txtimer()
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit cd3b3636c99fcac52c598b64061f3fe4413c6a12 upstream.
+
+The current stack implementation do not support ECTS requests of not
+aligned TP sized blocks.
+
+If ECTS will request a block with size and offset spanning two TP
+blocks, this will cause memcpy() to read beyond the queued skb (which
+does only contain one TP sized block).
+
+Sometimes KASAN will detect this read if the memory region beyond the
+skb was previously allocated and freed. In other situations it will stay
+undetected. The ETP transfer in any case will be corrupted.
+
+This patch adds a sanity check to avoid this kind of read and abort the
+session with error J1939_XTP_ABORT_ECTS_TOO_BIG.
+
+Reported-by: syzbot+5322482fe520b02aea30@syzkaller.appspotmail.com
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Cc: linux-stable <stable@vger.kernel.org> # >= v5.4
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20200807105200.26441-3-o.rempel@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/can/j1939/transport.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -787,6 +787,18 @@ static int j1939_session_tx_dat(struct j
+ if (len > 7)
+ len = 7;
+
++ if (offset + len > se_skb->len) {
++ netdev_err_once(priv->ndev,
++ "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
++ __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
++ return -EOVERFLOW;
++ }
++
++ if (!len) {
++ ret = -ENOBUFS;
++ break;
++ }
++
+ memcpy(&dat[1], &tpdat[offset], len);
+ ret = j1939_tp_tx_dat(session, dat, len + 1);
+ if (ret < 0) {
+@@ -1120,6 +1132,9 @@ static enum hrtimer_restart j1939_tp_txt
+ * cleanup including propagation of the error to user space.
+ */
+ break;
++ case -EOVERFLOW:
++ j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
++ break;
+ case 0:
+ session->tx_retry = 0;
+ break;
--- /dev/null
+From 79940e4d10df9c737a394630968471c632246ee0 Mon Sep 17 00:00:00 2001
+From: Jaehyun Chung <jaehyun.chung@amd.com>
+Date: Thu, 30 Jul 2020 16:31:29 -0400
+Subject: drm/amd/display: Blank stream before destroying HDCP session
+
+From: Jaehyun Chung <jaehyun.chung@amd.com>
+
+commit 79940e4d10df9c737a394630968471c632246ee0 upstream.
+
+[Why]
+Stream disable sequence incorretly destroys HDCP session while stream is
+not blanked and while audio is not muted. This sequence causes a flash
+of corruption during mode change and an audio click.
+
+[How]
+Change sequence to blank stream before destroying HDCP session. Audio will
+also be muted by blanking the stream.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jaehyun Chung <jaehyun.chung@amd.com>
+Reviewed-by: Alvin Lee <Alvin.Lee2@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -3265,12 +3265,11 @@ void core_link_disable_stream(struct pip
+ core_link_set_avmute(pipe_ctx, true);
+ }
+
++ dc->hwss.blank_stream(pipe_ctx);
+ #if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, true);
+ #endif
+
+- dc->hwss.blank_stream(pipe_ctx);
+-
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+
--- /dev/null
+From 8e80d482608a4e6a97c75272ef8b4bcfc5d0c490 Mon Sep 17 00:00:00 2001
+From: Paul Hsieh <paul.hsieh@amd.com>
+Date: Wed, 5 Aug 2020 17:28:37 +0800
+Subject: drm/amd/display: Fix DFPstate hang due to view port changed
+
+From: Paul Hsieh <paul.hsieh@amd.com>
+
+commit 8e80d482608a4e6a97c75272ef8b4bcfc5d0c490 upstream.
+
+[Why]
+Place the cursor in the center of screen between two pipes then
+adjusting the viewport but cursour doesn't update cause DFPstate hang.
+
+[How]
+If viewport changed, update cursor as well.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Hsieh <paul.hsieh@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1386,8 +1386,8 @@ static void dcn20_update_dchubp_dpp(
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+- pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
+- && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
++ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
++ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
--- /dev/null
+From b24bdc37d03a0478189e20a50286092840f414fa Mon Sep 17 00:00:00 2001
+From: Stylon Wang <stylon.wang@amd.com>
+Date: Tue, 28 Jul 2020 15:10:35 +0800
+Subject: drm/amd/display: Fix EDID parsing after resume from suspend
+
+From: Stylon Wang <stylon.wang@amd.com>
+
+commit b24bdc37d03a0478189e20a50286092840f414fa upstream.
+
+[Why]
+Resuming from suspend, CEA blocks from EDID are not parsed and no video
+modes can support YUV420. When this happens, output bpc cannot go over
+8-bit with 4K modes on HDMI.
+
+[How]
+In amdgpu_dm_update_connector_after_detect(), drm_add_edid_modes() is
+called after drm_connector_update_edid_property() to fully parse EDID
+and update display info.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Stylon Wang <stylon.wang@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2184,6 +2184,7 @@ void amdgpu_dm_update_connector_after_de
+
+ drm_connector_update_edid_property(connector,
+ aconnector->edid);
++ drm_add_edid_modes(connector, aconnector->edid);
+
+ if (aconnector->dc_link->aux_mode)
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
--- /dev/null
+From a49f6727e14caff32419cc3002b9ae9cafb750d7 Mon Sep 17 00:00:00 2001
+From: Aric Cyr <aric.cyr@amd.com>
+Date: Mon, 27 Jul 2020 21:21:16 -0400
+Subject: drm/amd/display: Fix incorrect backlight register offset for DCN
+
+From: Aric Cyr <aric.cyr@amd.com>
+
+commit a49f6727e14caff32419cc3002b9ae9cafb750d7 upstream.
+
+[Why]
+Typo in backlight refactor inctroduced wrong register offset.
+
+[How]
+Change DCE to DCN register map for PWRSEQ_REF_DIV
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Aric Cyr <aric.cyr@amd.com>
+Reviewed-by: Ashley Thomas <Ashley.Thomas2@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
+@@ -49,7 +49,7 @@
+ #define DCN_PANEL_CNTL_REG_LIST()\
+ DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+ DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+- DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
++ DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_PERIOD_CNTL), \
--- /dev/null
+From d2e59d0ff4c44d1f6f8ed884a5bea7d1bb7fd98c Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Thu, 6 Aug 2020 17:54:47 -0400
+Subject: drm/amd/display: fix pow() crashing when given base 0
+
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+
+commit d2e59d0ff4c44d1f6f8ed884a5bea7d1bb7fd98c upstream.
+
+[Why&How]
+pow(a,x) is implemented as exp(x*log(a)). log(0) will crash.
+So return 0^x = 0, unless x=0, convention seems to be 0^0 = 1.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fi
+ */
+ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
++ if (arg1.value == 0)
++ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
++
+ return dc_fixpt_exp(
+ dc_fixpt_mul(
+ dc_fixpt_log(arg1),
--- /dev/null
+From f41ed88cbd6f025f7a683a11a74f901555fba11c Mon Sep 17 00:00:00 2001
+From: Daniel Kolesa <daniel@octaforge.org>
+Date: Sat, 8 Aug 2020 22:42:35 +0200
+Subject: drm/amdgpu/display: use GFP_ATOMIC in dcn20_validate_bandwidth_internal
+
+From: Daniel Kolesa <daniel@octaforge.org>
+
+commit f41ed88cbd6f025f7a683a11a74f901555fba11c upstream.
+
+GFP_KERNEL may and will sleep, and this is being executed in
+a non-preemptible context; this will mess things up since it's
+called inbetween DC_FP_START/END, and rescheduling will result
+in the DC_FP_END later being called in a different context (or
+just crashing if any floating point/vector registers/instructions
+are used after the call is resumed in a different context).
+
+Signed-off-by: Daniel Kolesa <daniel@octaforge.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -3097,7 +3097,7 @@ static bool dcn20_validate_bandwidth_int
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
+- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
--- /dev/null
+From 78484d7c747e30468b35bd5f19edf602f50162a7 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sun, 9 Aug 2020 22:34:06 +0200
+Subject: drm: amdgpu: Use the correct size when allocating memory
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 78484d7c747e30468b35bd5f19edf602f50162a7 upstream.
+
+When '*sgt' is allocated, we must allocated 'sizeof(**sgt)' bytes instead
+of 'sizeof(*sg)'.
+
+The sizeof(*sg) is bigger than sizeof(**sgt) so this wastes memory but
+it won't lead to corruption.
+
+Fixes: f44ffd677fb3 ("drm/amdgpu: add support for exporting VRAM using DMA-buf v3")
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -465,7 +465,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amd
+ unsigned int pages;
+ int i, r;
+
+- *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
++ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
+ if (!*sgt)
+ return -ENOMEM;
+
--- /dev/null
+From 0b3171b6d195637f84ddf8b59bae818ea20bc8ac Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 8 Jul 2020 17:35:16 +0200
+Subject: ext4: do not block RWF_NOWAIT dio write on unallocated space
+
+From: Jan Kara <jack@suse.cz>
+
+commit 0b3171b6d195637f84ddf8b59bae818ea20bc8ac upstream.
+
+Since commit 378f32bab371 ("ext4: introduce direct I/O write using iomap
+infrastructure") we don't properly bail out of RWF_NOWAIT direct IO
+write if underlying blocks are not allocated. Also
+ext4_dio_write_checks() does not honor RWF_NOWAIT when re-acquiring
+i_rwsem. Fix both issues.
+
+Fixes: 378f32bab371 ("ext4: introduce direct I/O write using iomap infrastructure")
+Cc: stable@kernel.org
+Reported-by: Filipe Manana <fdmanana@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Link: https://lore.kernel.org/r/20200708153516.9507-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/file.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -428,6 +428,10 @@ restart:
+ */
+ if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
+ !ext4_overwrite_io(inode, offset, count))) {
++ if (iocb->ki_flags & IOCB_NOWAIT) {
++ ret = -EAGAIN;
++ goto out;
++ }
+ inode_unlock_shared(inode);
+ *ilock_shared = false;
+ inode_lock(inode);
--- /dev/null
+From 7303cb5bfe845f7d43cd9b2dbd37dbb266efda9b Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 31 Jul 2020 18:21:35 +0200
+Subject: ext4: fix checking of directory entry validity for inline directories
+
+From: Jan Kara <jack@suse.cz>
+
+commit 7303cb5bfe845f7d43cd9b2dbd37dbb266efda9b upstream.
+
+ext4_search_dir() and ext4_generic_delete_entry() can be called both for
+standard director blocks and for inline directories stored inside inode
+or inline xattr space. For the second case we didn't call
+ext4_check_dir_entry() with proper constraints that could result in
+accepting corrupted directory entry as well as false positive filesystem
+errors like:
+
+EXT4-fs error (device dm-0): ext4_search_dir:1395: inode #28320400:
+block 113246792: comm dockerd: bad entry in directory: directory entry too
+close to block end - offset=0, inode=28320403, rec_len=32, name_len=8,
+size=4096
+
+Fix the arguments passed to ext4_check_dir_entry().
+
+Fixes: 109ba779d6cc ("ext4: check for directory entries too close to block end")
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20200731162135.8080-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/namei.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1396,8 +1396,8 @@ int ext4_search_dir(struct buffer_head *
+ ext4_match(dir, fname, de)) {
+ /* found a match - just to be sure, do
+ * a full check */
+- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
+- bh->b_size, offset))
++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
++ buf_size, offset))
+ return -1;
+ *res_dir = de;
+ return 1;
+@@ -2472,7 +2472,7 @@ int ext4_generic_delete_entry(handle_t *
+ de = (struct ext4_dir_entry_2 *)entry_buf;
+ while (i < buf_size - csum_size) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+- bh->b_data, bh->b_size, i))
++ entry_buf, buf_size, i))
+ return -EFSCORRUPTED;
+ if (de == de_del) {
+ if (pde)
--- /dev/null
+From b711d4eaf0c408a811311ee3e94d6e9e5a230a9a Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 16 Aug 2020 08:23:05 -0700
+Subject: io_uring: find and cancel head link async work on files exit
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit b711d4eaf0c408a811311ee3e94d6e9e5a230a9a upstream.
+
+Commit f254ac04c874 ("io_uring: enable lookup of links holding inflight files")
+only handled 2 out of the three head link cases we have, we also need to
+lookup and cancel work that is blocked in io-wq if that work has a link
+that's holding a reference to the files structure.
+
+Put the "cancel head links that hold this request pending" logic into
+io_attempt_cancel(), which will to through the motions of finding and
+canceling head links that hold the current inflight files stable request
+pending.
+
+Cc: stable@vger.kernel.org
+Reported-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 33 +++++++++++++++++++++++++++++----
+ 1 file changed, 29 insertions(+), 4 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7609,6 +7609,33 @@ static bool io_timeout_remove_link(struc
+ return found;
+ }
+
++static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
++{
++ return io_match_link(container_of(work, struct io_kiocb, work), data);
++}
++
++static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
++{
++ enum io_wq_cancel cret;
++
++ /* cancel this particular work, if it's running */
++ cret = io_wq_cancel_work(ctx->io_wq, &req->work);
++ if (cret != IO_WQ_CANCEL_NOTFOUND)
++ return;
++
++ /* find links that hold this pending, cancel those */
++ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
++ if (cret != IO_WQ_CANCEL_NOTFOUND)
++ return;
++
++ /* if we have a poll link holding this pending, cancel that */
++ if (io_poll_remove_link(ctx, req))
++ return;
++
++ /* final option, timeout link is holding this req pending */
++ io_timeout_remove_link(ctx, req);
++}
++
+ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+ {
+@@ -7665,10 +7692,8 @@ static void io_uring_cancel_files(struct
+ continue;
+ }
+ } else {
+- io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+- /* could be a link, check and remove if it is */
+- if (!io_poll_remove_link(ctx, cancel_req))
+- io_timeout_remove_link(ctx, cancel_req);
++ /* cancel this request, or head link requests */
++ io_attempt_cancel(ctx, cancel_req);
+ io_put_req(cancel_req);
+ }
+
--- /dev/null
+From ef3f5830b859604eda8723c26d90ab23edc027a4 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Sat, 20 Jun 2020 14:19:48 +0800
+Subject: jbd2: add the missing unlock_buffer() in the error path of jbd2_write_superblock()
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit ef3f5830b859604eda8723c26d90ab23edc027a4 upstream.
+
+jbd2_write_superblock() is under the buffer lock of journal superblock
+before ending that superblock write, so add a missing unlock_buffer() in
+in the error path before submitting buffer.
+
+Fixes: 742b06b5628f ("jbd2: check superblock mapped prior to committing")
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/r/20200620061948.2049579-1-yi.zhang@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd2/journal.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1367,8 +1367,10 @@ static int jbd2_write_superblock(journal
+ int ret;
+
+ /* Buffer got discarded which means block device got invalidated */
+- if (!buffer_mapped(bh))
++ if (!buffer_mapped(bh)) {
++ unlock_buffer(bh);
+ return -EIO;
++ }
+
+ trace_jbd2_write_superblock(journal, write_flags);
+ if (!(journal->j_flags & JBD2_BARRIER))
--- /dev/null
+From 71e843295c680898959b22dc877ae3839cc22470 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Thu, 20 Aug 2020 17:42:14 -0700
+Subject: kernel/relay.c: fix memleak on destroy relay channel
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit 71e843295c680898959b22dc877ae3839cc22470 upstream.
+
+kmemleak report memory leak as follows:
+
+ unreferenced object 0x607ee4e5f948 (size 8):
+ comm "syz-executor.1", pid 2098, jiffies 4295031601 (age 288.468s)
+ hex dump (first 8 bytes):
+ 00 00 00 00 00 00 00 00 ........
+ backtrace:
+ relay_open kernel/relay.c:583 [inline]
+ relay_open+0xb6/0x970 kernel/relay.c:563
+ do_blk_trace_setup+0x4a8/0xb20 kernel/trace/blktrace.c:557
+ __blk_trace_setup+0xb6/0x150 kernel/trace/blktrace.c:597
+ blk_trace_ioctl+0x146/0x280 kernel/trace/blktrace.c:738
+ blkdev_ioctl+0xb2/0x6a0 block/ioctl.c:613
+ block_ioctl+0xe5/0x120 fs/block_dev.c:1871
+ vfs_ioctl fs/ioctl.c:48 [inline]
+ __do_sys_ioctl fs/ioctl.c:753 [inline]
+ __se_sys_ioctl fs/ioctl.c:739 [inline]
+ __x64_sys_ioctl+0x170/0x1ce fs/ioctl.c:739
+ do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+'chan->buf' is malloced in relay_open() by alloc_percpu() but not free
+while destroy the relay channel. Fix it by adding free_percpu() before
+return from relay_destroy_channel().
+
+Fixes: 017c59c042d0 ("relay: Use per CPU constructs for the relay channel buffer pointers")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Michel Lespinasse <walken@google.com>
+Cc: Daniel Axtens <dja@axtens.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Akash Goel <akash.goel@intel.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200817122826.48518-1-weiyongjun1@huawei.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/relay.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -197,6 +197,7 @@ free_buf:
+ static void relay_destroy_channel(struct kref *kref)
+ {
+ struct rchan *chan = container_of(kref, struct rchan, kref);
++ free_percpu(chan->buf);
+ kfree(chan);
+ }
+
--- /dev/null
+From fdfe7cbd58806522e799e2a50a15aee7f2cbb7b6 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Tue, 11 Aug 2020 11:27:24 +0100
+Subject: KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()
+
+From: Will Deacon <will@kernel.org>
+
+commit fdfe7cbd58806522e799e2a50a15aee7f2cbb7b6 upstream.
+
+The 'flags' field of 'struct mmu_notifier_range' is used to indicate
+whether invalidate_range_{start,end}() are permitted to block. In the
+case of kvm_mmu_notifier_invalidate_range_start(), this field is not
+forwarded on to the architecture-specific implementation of
+kvm_unmap_hva_range() and therefore the backend cannot sensibly decide
+whether or not to block.
+
+Add an extra 'flags' parameter to kvm_unmap_hva_range() so that
+architectures are aware as to whether or not they are permitted to block.
+
+Cc: <stable@vger.kernel.org>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Message-Id: <20200811102725.7121-2-will@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_host.h | 2 +-
+ arch/arm64/kvm/mmu.c | 2 +-
+ arch/mips/include/asm/kvm_host.h | 2 +-
+ arch/mips/kvm/mmu.c | 3 ++-
+ arch/powerpc/include/asm/kvm_host.h | 3 ++-
+ arch/powerpc/kvm/book3s.c | 3 ++-
+ arch/powerpc/kvm/e500_mmu_host.c | 3 ++-
+ arch/x86/include/asm/kvm_host.h | 3 ++-
+ arch/x86/kvm/mmu/mmu.c | 3 ++-
+ virt/kvm/kvm_main.c | 3 ++-
+ 10 files changed, 17 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -443,7 +443,7 @@ int __kvm_arm_vcpu_set_events(struct kvm
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, unsigned flags);
+ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -2203,7 +2203,7 @@ static int kvm_unmap_hva_handler(struct
+ }
+
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end)
++ unsigned long start, unsigned long end, unsigned flags)
+ {
+ if (!kvm->arch.pgd)
+ return 0;
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -981,7 +981,7 @@ enum kvm_mips_fault_result kvm_trap_emul
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, unsigned flags);
+ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+--- a/arch/mips/kvm/mmu.c
++++ b/arch/mips/kvm/mmu.c
+@@ -518,7 +518,8 @@ static int kvm_unmap_hva_handler(struct
+ return 1;
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ unsigned flags)
+ {
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -58,7 +58,8 @@
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+
+ extern int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end,
++ unsigned flags);
+ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(st
+ kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ unsigned flags)
+ {
+ return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+ }
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm
+ return 0;
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ unsigned flags)
+ {
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1641,7 +1641,8 @@ asmlinkage void kvm_spurious_fault(void)
+ _ASM_EXTABLE(666b, 667b)
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ unsigned flags);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -1971,7 +1971,8 @@ static int kvm_handle_hva(struct kvm *kv
+ return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ unsigned flags)
+ {
+ return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ }
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -427,7 +427,8 @@ static int kvm_mmu_notifier_invalidate_r
+ * count is also read inside the mmu_lock critical section.
+ */
+ kvm->mmu_notifier_count++;
+- need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
++ need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
++ range->flags);
+ need_tlb_flush |= kvm->tlbs_dirty;
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
--- /dev/null
+From e08d3fdfe2dafa0331843f70ce1ff6c1c4900bf4 Mon Sep 17 00:00:00 2001
+From: Doug Berger <opendmb@gmail.com>
+Date: Thu, 20 Aug 2020 17:42:24 -0700
+Subject: mm: include CMA pages in lowmem_reserve at boot
+
+From: Doug Berger <opendmb@gmail.com>
+
+commit e08d3fdfe2dafa0331843f70ce1ff6c1c4900bf4 upstream.
+
+The lowmem_reserve arrays provide a means of applying pressure against
+allocations from lower zones that were targeted at higher zones. Its
+values are a function of the number of pages managed by higher zones and
+are assigned by a call to the setup_per_zone_lowmem_reserve() function.
+
+The function is initially called at boot time by the function
+init_per_zone_wmark_min() and may be called later by accesses of the
+/proc/sys/vm/lowmem_reserve_ratio sysctl file.
+
+The function init_per_zone_wmark_min() was moved up from a module_init to
+a core_initcall to resolve a sequencing issue with khugepaged.
+Unfortunately this created a sequencing issue with CMA page accounting.
+
+The CMA pages are added to the managed page count of a zone when
+cma_init_reserved_areas() is called at boot also as a core_initcall. This
+makes it uncertain whether the CMA pages will be added to the managed page
+counts of their zones before or after the call to
+init_per_zone_wmark_min() as it becomes dependent on link order. With the
+current link order the pages are added to the managed count after the
+lowmem_reserve arrays are initialized at boot.
+
+This means the lowmem_reserve values at boot may be lower than the values
+used later if /proc/sys/vm/lowmem_reserve_ratio is accessed even if the
+ratio values are unchanged.
+
+In many cases the difference is not significant, but for example
+an ARM platform with 1GB of memory and the following memory layout
+
+ cma: Reserved 256 MiB at 0x0000000030000000
+ Zone ranges:
+ DMA [mem 0x0000000000000000-0x000000002fffffff]
+ Normal empty
+ HighMem [mem 0x0000000030000000-0x000000003fffffff]
+
+would result in 0 lowmem_reserve for the DMA zone. This would allow
+userspace to deplete the DMA zone easily.
+
+Funnily enough
+
+ $ cat /proc/sys/vm/lowmem_reserve_ratio
+
+would fix up the situation because as a side effect it forces
+setup_per_zone_lowmem_reserve.
+
+This commit breaks the link order dependency by invoking
+init_per_zone_wmark_min() as a postcore_initcall so that the CMA pages
+have the chance to be properly accounted in their zone(s) and allowing
+the lowmem_reserve arrays to receive consistent values.
+
+Fixes: bc22af74f271 ("mm: update min_free_kbytes from khugepaged after core initialization")
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/1597423766-27849-1-git-send-email-opendmb@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -7881,7 +7881,7 @@ int __meminit init_per_zone_wmark_min(vo
+
+ return 0;
+ }
+-core_initcall(init_per_zone_wmark_min)
++postcore_initcall(init_per_zone_wmark_min)
+
+ /*
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
--- /dev/null
+From b7333b58f358f38d90d78e00c1ee5dec82df10ad Mon Sep 17 00:00:00 2001
+From: Yang Shi <shy828301@gmail.com>
+Date: Fri, 14 Aug 2020 21:30:41 -0700
+Subject: mm/memory.c: skip spurious TLB flush for retried page fault
+
+From: Yang Shi <shy828301@gmail.com>
+
+commit b7333b58f358f38d90d78e00c1ee5dec82df10ad upstream.
+
+Recently we found regression when running will_it_scale/page_fault3 test
+on ARM64. Over 70% down for the multi processes cases and over 20% down
+for the multi threads cases. It turns out the regression is caused by
+commit 89b15332af7c ("mm: drop mmap_sem before calling
+balance_dirty_pages() in write fault").
+
+The test mmaps a memory size file then write to the mapping, this would
+make all memory dirty and trigger dirty pages throttle, that upstream
+commit would release mmap_sem then retry the page fault. The retried
+page fault would see correct PTEs installed then just fall through to
+spurious TLB flush. The regression is caused by the excessive spurious
+TLB flush. It is fine on x86 since x86's spurious TLB flush is no-op.
+
+We could just skip the spurious TLB flush to mitigate the regression.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: Xu Yu <xuyu@linux.alibaba.com>
+Debugged-by: Xu Yu <xuyu@linux.alibaba.com>
+Tested-by: Xu Yu <xuyu@linux.alibaba.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Yang Shi <shy828301@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4248,6 +4248,9 @@ static vm_fault_t handle_pte_fault(struc
+ vmf->flags & FAULT_FLAG_WRITE)) {
+ update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
+ } else {
++ /* Skip spurious TLB flush for retried page fault */
++ if (vmf->flags & FAULT_FLAG_TRIED)
++ goto unlock;
+ /*
+ * This is needed only for protection faults but the arch code
+ * is not yet telling us if this is a protection fault or not.
--- /dev/null
+From 88e8ac11d2ea3acc003cf01bb5a38c8aa76c3cfd Mon Sep 17 00:00:00 2001
+From: Charan Teja Reddy <charante@codeaurora.org>
+Date: Thu, 20 Aug 2020 17:42:27 -0700
+Subject: mm, page_alloc: fix core hung in free_pcppages_bulk()
+
+From: Charan Teja Reddy <charante@codeaurora.org>
+
+commit 88e8ac11d2ea3acc003cf01bb5a38c8aa76c3cfd upstream.
+
+The following race is observed with the repeated online, offline and a
+delay between two successive online of memory blocks of movable zone.
+
+P1 P2
+
+Online the first memory block in
+the movable zone. The pcp struct
+values are initialized to default
+values,i.e., pcp->high = 0 &
+pcp->batch = 1.
+
+ Allocate the pages from the
+ movable zone.
+
+Try to Online the second memory
+block in the movable zone thus it
+entered the online_pages() but yet
+to call zone_pcp_update().
+ This process is entered into
+ the exit path thus it tries
+ to release the order-0 pages
+ to pcp lists through
+ free_unref_page_commit().
+ As pcp->high = 0, pcp->count = 1
+ proceed to call the function
+ free_pcppages_bulk().
+Update the pcp values thus the
+new pcp values are like, say,
+pcp->high = 378, pcp->batch = 63.
+ Read the pcp's batch value using
+ READ_ONCE() and pass the same to
+ free_pcppages_bulk(), pcp values
+ passed here are, batch = 63,
+ count = 1.
+
+ Since num of pages in the pcp
+ lists are less than ->batch,
+ then it will stuck in
+ while(list_empty(list)) loop
+ with interrupts disabled thus
+ a core hung.
+
+Avoid this by ensuring free_pcppages_bulk() is called with proper count of
+pcp list pages.
+
+The mentioned race is some what easily reproducible without [1] because
+pcp's are not updated for the first memory block online and thus there is
+a enough race window for P2 between alloc+free and pcp struct values
+update through onlining of second memory block.
+
+With [1], the race still exists but it is very narrow as we update the pcp
+struct values for the first memory block online itself.
+
+This is not limited to the movable zone, it could also happen in cases
+with the normal zone (e.g., hotplug to a node that only has DMA memory, or
+no other memory yet).
+
+[1]: https://patchwork.kernel.org/patch/11696389/
+
+Fixes: 5f8dcc21211a ("page-allocator: split per-cpu list into one-list-per-migrate-type")
+Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Vinayak Menon <vinmenon@codeaurora.org>
+Cc: <stable@vger.kernel.org> [2.6+]
+Link: http://lkml.kernel.org/r/1597150703-19003-1-git-send-email-charante@codeaurora.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1306,6 +1306,11 @@ static void free_pcppages_bulk(struct zo
+ struct page *page, *tmp;
+ LIST_HEAD(head);
+
++ /*
++ * Ensure proper count is passed which otherwise would stuck in the
++ * below while (list_empty(list)) loop.
++ */
++ count = min(pcp->count, count);
+ while (count) {
+ struct list_head *list;
+
--- /dev/null
+From e47110e90584a22e9980510b00d0dfad3a83354e Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Thu, 20 Aug 2020 17:42:05 -0700
+Subject: mm/vunmap: add cond_resched() in vunmap_pmd_range
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit e47110e90584a22e9980510b00d0dfad3a83354e upstream.
+
+Like zap_pte_range add cond_resched so that we can avoid softlockups as
+reported below. On non-preemptible kernel with large I/O map region (like
+the one we get when using persistent memory with sector mode), an unmap of
+the namespace can report below softlockups.
+
+22724.027334] watchdog: BUG: soft lockup - CPU#49 stuck for 23s! [ndctl:50777]
+ NIP [c0000000000dc224] plpar_hcall+0x38/0x58
+ LR [c0000000000d8898] pSeries_lpar_hpte_invalidate+0x68/0xb0
+ Call Trace:
+ flush_hash_page+0x114/0x200
+ hpte_need_flush+0x2dc/0x540
+ vunmap_page_range+0x538/0x6f0
+ free_unmap_vmap_area+0x30/0x70
+ remove_vm_area+0xfc/0x140
+ __vunmap+0x68/0x270
+ __iounmap.part.0+0x34/0x60
+ memunmap+0x54/0x70
+ release_nodes+0x28c/0x300
+ device_release_driver_internal+0x16c/0x280
+ unbind_store+0x124/0x170
+ drv_attr_store+0x44/0x60
+ sysfs_kf_write+0x64/0x90
+ kernfs_fop_write+0x1b0/0x290
+ __vfs_write+0x3c/0x70
+ vfs_write+0xd8/0x260
+ ksys_write+0xdc/0x130
+ system_call+0x5c/0x70
+
+Reported-by: Harish Sriram <harish@linux.ibm.com>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200807075933.310240-1-aneesh.kumar@linux.ibm.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmalloc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -102,6 +102,8 @@ static void vunmap_pmd_range(pud_t *pud,
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ vunmap_pte_range(pmd, addr, next, mask);
++
++ cond_resched();
+ } while (pmd++, addr = next, addr != end);
+ }
+
--- /dev/null
+From a4501bac0e553bed117b7e1b166d49731caf7260 Mon Sep 17 00:00:00 2001
+From: Rajendra Nayak <rnayak@codeaurora.org>
+Date: Mon, 10 Aug 2020 12:36:19 +0530
+Subject: opp: Enable resources again if they were disabled earlier
+
+From: Rajendra Nayak <rnayak@codeaurora.org>
+
+commit a4501bac0e553bed117b7e1b166d49731caf7260 upstream.
+
+dev_pm_opp_set_rate() can now be called with freq = 0 in order
+to either drop performance or bandwidth votes or to disable
+regulators on platforms which support them.
+
+In such cases, a subsequent call to dev_pm_opp_set_rate() with
+the same frequency ends up returning early because 'old_freq == freq'
+
+Instead make it fall through and put back the dropped performance
+and bandwidth votes and/or enable back the regulators.
+
+Cc: v5.3+ <stable@vger.kernel.org> # v5.3+
+Fixes: cd7ea582866f ("opp: Make dev_pm_opp_set_rate() handle freq = 0 to drop performance votes")
+Reported-by: Sajida Bhanu <sbhanu@codeaurora.org>
+Reviewed-by: Sibi Sankar <sibis@codeaurora.org>
+Reported-by: Matthias Kaehlcke <mka@chromium.org>
+Tested-by: Matthias Kaehlcke <mka@chromium.org>
+Reviewed-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Rajendra Nayak <rnayak@codeaurora.org>
+[ Viresh: Don't skip clk_set_rate() and massaged changelog ]
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/opp/core.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -901,10 +901,13 @@ int dev_pm_opp_set_rate(struct device *d
+
+ /* Return early if nothing to do */
+ if (old_freq == freq) {
+- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+- __func__, freq);
+- ret = 0;
+- goto put_opp_table;
++ if (!opp_table->required_opp_tables && !opp_table->regulators &&
++ !opp_table->paths) {
++ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
++ __func__, freq);
++ ret = 0;
++ goto put_opp_table;
++ }
+ }
+
+ /*
--- /dev/null
+From 8979ef70850eb469e1094279259d1ef393ffe85f Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <swboyd@chromium.org>
+Date: Tue, 11 Aug 2020 14:28:36 -0700
+Subject: opp: Put opp table in dev_pm_opp_set_rate() for empty tables
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+commit 8979ef70850eb469e1094279259d1ef393ffe85f upstream.
+
+We get the opp_table pointer at the top of the function and so we should
+put the pointer at the end of the function like all other exit paths
+from this function do.
+
+Cc: v5.7+ <stable@vger.kernel.org> # v5.7+
+Fixes: aca48b61f963 ("opp: Manage empty OPP tables with clk handle")
+Reviewed-by: Rajendra Nayak <rnayak@codeaurora.org>
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+[ Viresh: Split the patch into two ]
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/opp/core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -862,8 +862,10 @@ int dev_pm_opp_set_rate(struct device *d
+ * have OPP table for the device, while others don't and
+ * opp_set_rate() just needs to behave like clk_set_rate().
+ */
+- if (!_get_opp_count(opp_table))
+- return 0;
++ if (!_get_opp_count(opp_table)) {
++ ret = 0;
++ goto put_opp_table;
++ }
+
+ if (!opp_table->required_opp_tables && !opp_table->regulators &&
+ !opp_table->paths) {
--- /dev/null
+From d4ec88d205583ac4f9482cf3e89128589bd881d2 Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <swboyd@chromium.org>
+Date: Tue, 11 Aug 2020 14:28:36 -0700
+Subject: opp: Put opp table in dev_pm_opp_set_rate() if _set_opp_bw() fails
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+commit d4ec88d205583ac4f9482cf3e89128589bd881d2 upstream.
+
+We get the opp_table pointer at the top of the function and so we should
+put the pointer at the end of the function like all other exit paths
+from this function do.
+
+Cc: v5.8+ <stable@vger.kernel.org> # v5.8+
+Fixes: b00e667a6d8b ("opp: Remove bandwidth votes when target_freq is zero")
+Reviewed-by: Rajendra Nayak <rnayak@codeaurora.org>
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+[ Viresh: Split the patch into two ]
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/opp/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -876,7 +876,7 @@ int dev_pm_opp_set_rate(struct device *d
+
+ ret = _set_opp_bw(opp_table, NULL, dev, true);
+ if (ret)
+- return ret;
++ goto put_opp_table;
+
+ if (opp_table->regulator_enabled) {
+ regulator_disable(opp_table->regulators[0]);
--- /dev/null
+From b25e8e85e75a61af1ddc88c4798387dd3132dd43 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Tue, 11 Aug 2020 13:49:31 -0400
+Subject: RDMA/hfi1: Correct an interlock issue for TID RDMA WRITE request
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit b25e8e85e75a61af1ddc88c4798387dd3132dd43 upstream.
+
+The following message occurs when running an AI application with TID RDMA
+enabled:
+
+hfi1 0000:7f:00.0: hfi1_0: [QP74] hfi1_tid_timeout 4084
+hfi1 0000:7f:00.0: hfi1_0: [QP70] hfi1_tid_timeout 4084
+
+The issue happens when TID RDMA WRITE request is followed by an
+IB_WR_RDMA_WRITE_WITH_IMM request, the latter could be completed first on
+the responder side. As a result, no ACK packet for the latter could be
+sent because the TID RDMA WRITE request is still being processed on the
+responder side.
+
+When the TID RDMA WRITE request is eventually completed, the requester
+will wait for the IB_WR_RDMA_WRITE_WITH_IMM request to be acknowledged.
+
+If the next request is another TID RDMA WRITE request, no TID RDMA WRITE
+DATA packet could be sent because the preceding IB_WR_RDMA_WRITE_WITH_IMM
+request is not completed yet.
+
+Consequently the IB_WR_RDMA_WRITE_WITH_IMM will be retried but it will be
+ignored on the responder side because the responder thinks it has already
+been completed. Eventually the retry will be exhausted and the qp will be
+put into error state on the requester side. On the responder side, the TID
+resource timer will eventually expire because no TID RDMA WRITE DATA
+packets will be received for the second TID RDMA WRITE request. There is
+also risk of a write-after-write memory corruption due to the issue.
+
+Fix by adding a requester side interlock to prevent any potential data
+corruption and TID RDMA protocol error.
+
+Fixes: a0b34f75ec20 ("IB/hfi1: Add interlock between a TID RDMA request and other requests")
+Link: https://lore.kernel.org/r/20200811174931.191210.84093.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org> # 5.4.x+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/tid_rdma.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_RDMA_WRITE:
++ case IB_WR_RDMA_WRITE_WITH_IMM:
+ switch (prev->wr.opcode) {
+ case IB_WR_TID_RDMA_WRITE:
+ req = wqe_to_tid_req(prev);
--- /dev/null
+From bcf85fcedfdd17911982a3e3564fcfec7b01eebd Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Thu, 20 Aug 2020 17:42:11 -0700
+Subject: romfs: fix uninitialized memory leak in romfs_dev_read()
+
+From: Jann Horn <jannh@google.com>
+
+commit bcf85fcedfdd17911982a3e3564fcfec7b01eebd upstream.
+
+romfs has a superblock field that limits the size of the filesystem; data
+beyond that limit is never accessed.
+
+romfs_dev_read() fetches a caller-supplied number of bytes from the
+backing device. It returns 0 on success or an error code on failure;
+therefore, its API can't represent short reads, it's all-or-nothing.
+
+However, when romfs_dev_read() detects that the requested operation would
+cross the filesystem size limit, it currently silently truncates the
+requested number of bytes. This e.g. means that when the content of a
+file with size 0x1000 starts one byte before the filesystem size limit,
+->readpage() will only fill a single byte of the supplied page while
+leaving the rest uninitialized, leaking that uninitialized memory to
+userspace.
+
+Fix it by returning an error code instead of truncating the read when the
+requested read operation would go beyond the end of the filesystem.
+
+Fixes: da4458bda237 ("NOMMU: Make it possible for RomFS to use MTD devices directly")
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: David Howells <dhowells@redhat.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200818013202.2246365-1-jannh@google.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/romfs/storage.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/romfs/storage.c
++++ b/fs/romfs/storage.c
+@@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *s
+ size_t limit;
+
+ limit = romfs_maxsize(sb);
+- if (pos >= limit)
++ if (pos >= limit || buflen > limit - pos)
+ return -EIO;
+- if (buflen > limit - pos)
+- buflen = limit - pos;
+
+ #ifdef CONFIG_ROMFS_ON_MTD
+ if (sb->s_mtd)
--- /dev/null
+From b97bf44f99155e57088e16974afb1f2d7b5287aa Mon Sep 17 00:00:00 2001
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+Date: Mon, 3 Aug 2020 17:58:10 +0200
+Subject: s390/pci: fix PF/VF linking on hot plug
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+commit b97bf44f99155e57088e16974afb1f2d7b5287aa upstream.
+
+Currently there are four places in which a PCI function is scanned
+and made available to drivers:
+ 1. In pci_scan_root_bus() as part of the initial zbus
+ creation.
+ 2. In zpci_bus_add_devices() when registering
+ a device in configured state on a zbus that has already been
+ scanned.
+ 3. When a function is already known to zPCI (in reserved/standby state)
+ and configuration is triggered through firmware by PEC 0x301.
+ 4. When a device is already known to zPCI (in standby/reserved state)
+ and configuration is triggered from within Linux using
+ enable_slot().
+
+The PF/VF linking step and setting of pdev->is_virtfn introduced with
+commit e5794cf1a270 ("s390/pci: create links between PFs and VFs") was
+only triggered for the second case, which is where VFs created through
+sriov_numvfs usually land. However unlike some other platforms but like
+POWER VFs can be individually enabled/disabled through
+/sys/bus/pci/slots.
+
+Fix this by doing VF setup as part of pcibios_bus_add_device() which is
+called in all of the above cases.
+
+Finally to remove the PF/VF links call the common code
+pci_iov_remove_virtfn() function to remove linked VFs.
+This takes care of the necessary sysfs cleanup.
+
+Fixes: e5794cf1a270 ("s390/pci: create links between PFs and VFs")
+Cc: <stable@vger.kernel.org> # 5.8: 2f0230b2f2d5: s390/pci: re-introduce zpci_remove_device()
+Cc: <stable@vger.kernel.org> # 5.8
+Acked-by: Pierre Morel <pmorel@linux.ibm.com>
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci.c | 5 ++++-
+ arch/s390/pci/pci_bus.c | 27 +++++++++++++++------------
+ arch/s390/pci/pci_bus.h | 13 +++++++++++++
+ 3 files changed, 32 insertions(+), 13 deletions(-)
+
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -678,8 +678,11 @@ void zpci_remove_device(struct zpci_dev
+ struct pci_dev *pdev;
+
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+- if (pdev)
++ if (pdev) {
++ if (pdev->is_virtfn)
++ return zpci_remove_virtfn(pdev, zdev->vfn);
+ pci_stop_and_remove_bus_device_locked(pdev);
++ }
+ }
+
+ int zpci_create_device(struct zpci_dev *zdev)
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -189,6 +189,19 @@ static inline int zpci_bus_setup_virtfn(
+ }
+ #endif
+
++void pcibios_bus_add_device(struct pci_dev *pdev)
++{
++ struct zpci_dev *zdev = to_zpci(pdev);
++
++ /*
++ * With pdev->no_vf_scan the common PCI probing code does not
++ * perform PF/VF linking.
++ */
++ if (zdev->vfn)
++ zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
++
++}
++
+ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+ {
+ struct pci_bus *bus;
+@@ -219,20 +232,10 @@ static int zpci_bus_add_device(struct zp
+ }
+
+ pdev = pci_scan_single_device(bus, zdev->devfn);
+- if (pdev) {
+- if (!zdev->is_physfn) {
+- rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
+- if (rc)
+- goto failed_with_pdev;
+- }
++ if (pdev)
+ pci_bus_add_device(pdev);
+- }
+- return 0;
+
+-failed_with_pdev:
+- pci_stop_and_remove_bus_device(pdev);
+- pci_dev_put(pdev);
+- return rc;
++ return 0;
+ }
+
+ static void zpci_bus_add_devices(struct zpci_bus *zbus)
+--- a/arch/s390/pci/pci_bus.h
++++ b/arch/s390/pci/pci_bus.h
+@@ -29,3 +29,16 @@ static inline struct zpci_dev *get_zdev_
+
+ return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
+ }
++
++#ifdef CONFIG_PCI_IOV
++static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn)
++{
++
++ pci_lock_rescan_remove();
++ /* Linux' vfid's start at 0 vfn at 1 */
++ pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
++ pci_unlock_rescan_remove();
++}
++#else /* CONFIG_PCI_IOV */
++static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {}
++#endif /* CONFIG_PCI_IOV */
--- /dev/null
+From 3cddb79afc60bcdb5fd9dd7a1c64a8d03bdd460f Mon Sep 17 00:00:00 2001
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+Date: Mon, 3 Aug 2020 09:33:29 +0200
+Subject: s390/pci: fix zpci_bus_link_virtfn()
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+commit 3cddb79afc60bcdb5fd9dd7a1c64a8d03bdd460f upstream.
+
+We were missing the pci_dev_put() for candidate PFs. Furhtermore in
+discussion with upstream it turns out that somewhat counterintuitively
+some common code, in particular the vfio-pci driver, assumes that
+pdev->is_virtfn always implies that pdev->physfn is set, i.e. that VFs
+are always linked.
+While POWER does seem to set pdev->is_virtfn even for unlinked functions
+(see comments in arch/powerpc/kernel/eeh.c:eeh_debugfs_break_device())
+for now just be safe and only set pdev->is_virtfn on linking.
+Also make sure that we only search for parent PFs if the zbus is
+multifunction and we thus know the devfn values supplied by firmware
+come from the RID.
+
+Fixes: e5794cf1a270 ("s390/pci: create links between PFs and VFs")
+Cc: <stable@vger.kernel.org> # 5.8
+Reviewed-by: Pierre Morel <pmorel@linux.ibm.com>
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci_bus.c | 25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -132,13 +132,14 @@ static int zpci_bus_link_virtfn(struct p
+ {
+ int rc;
+
+- virtfn->physfn = pci_dev_get(pdev);
+ rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
+- if (rc) {
+- pci_dev_put(pdev);
+- virtfn->physfn = NULL;
++ if (rc)
+ return rc;
+- }
++
++ virtfn->is_virtfn = 1;
++ virtfn->multifunction = 0;
++ virtfn->physfn = pci_dev_get(pdev);
++
+ return 0;
+ }
+
+@@ -151,9 +152,9 @@ static int zpci_bus_setup_virtfn(struct
+ int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
+ int rc = 0;
+
+- virtfn->is_virtfn = 1;
+- virtfn->multifunction = 0;
+- WARN_ON(vfid < 0);
++ if (!zbus->multifunction)
++ return 0;
++
+ /* If the parent PF for the given VF is also configured in the
+ * instance, it must be on the same zbus.
+ * We can then identify the parent PF by checking what
+@@ -165,11 +166,17 @@ static int zpci_bus_setup_virtfn(struct
+ zdev = zbus->function[i];
+ if (zdev && zdev->is_physfn) {
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
++ if (!pdev)
++ continue;
+ cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
+ if (cand_devfn == virtfn->devfn) {
+ rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
++ /* balance pci_get_slot() */
++ pci_dev_put(pdev);
+ break;
+ }
++ /* balance pci_get_slot() */
++ pci_dev_put(pdev);
+ }
+ }
+ return rc;
+@@ -178,8 +185,6 @@ static int zpci_bus_setup_virtfn(struct
+ static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
+ struct pci_dev *virtfn, int vfn)
+ {
+- virtfn->is_virtfn = 1;
+- virtfn->multifunction = 0;
+ return 0;
+ }
+ #endif
--- /dev/null
+From b76fee1bc56c31a9d2a49592810eba30cc06d61a Mon Sep 17 00:00:00 2001
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+Date: Tue, 4 Aug 2020 13:01:26 +0200
+Subject: s390/pci: ignore stale configuration request event
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+commit b76fee1bc56c31a9d2a49592810eba30cc06d61a upstream.
+
+A configuration request event may be stale, that is the event
+may reference a zdev which was already configured.
+This can happen when a hotplug happens during boot such that
+the device is discovered and configured in the initial clp_list_pci(),
+then after initialization we enable events and process
+the original configuration request which additionally still contains
+the old disabled function handle leading to a failure during device
+enablement and subsequent I/O lockout.
+
+Fix this by restoring the check that the device to be configured is in
+standby which was removed in commit f606b3ef47c9 ("s390/pci: adapt events
+for zbus").
+
+This check does not need serialization as we only enable the events after
+zPCI has fully initialized, which includes the initial clp_list_pci(),
+rescan only does updates and events are serialized with respect to each
+other.
+
+Fixes: f606b3ef47c9 ("s390/pci: adapt events for zbus")
+Cc: <stable@vger.kernel.org> # 5.8
+Reported-by: Shalini Chellathurai Saroja <shalini@linux.ibm.com>
+Tested-by: Shalini Chellathurai Saroja <shalini@linux.ibm.com>
+Acked-by: Pierre Morel <pmorel@linux.ibm.com>
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci_event.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -92,6 +92,9 @@ static void __zpci_event_availability(st
+ ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
+ break;
+ }
++ /* the configuration request may be stale */
++ if (zdev->state != ZPCI_FN_STATE_STANDBY)
++ break;
+ zdev->fh = ccdf->fh;
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ ret = zpci_enable_device(zdev);
--- /dev/null
+From 2f0230b2f2d5fd287a85583eefb5aed35b6fe510 Mon Sep 17 00:00:00 2001
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+Date: Mon, 3 Aug 2020 17:46:32 +0200
+Subject: s390/pci: re-introduce zpci_remove_device()
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+commit 2f0230b2f2d5fd287a85583eefb5aed35b6fe510 upstream.
+
+For fixing the PF to VF link removal we need to perform some action on
+every removal of a zdev from the common PCI subsystem.
+So in preparation re-introduce zpci_remove_device() and use that instead
+of directly calling the common code functions. This was actually still
+declared from earlier code but no longer implemented.
+
+Reviewed-by: Pierre Morel <pmorel@linux.ibm.com>
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci.c | 19 ++++++++++++-------
+ arch/s390/pci/pci_event.c | 4 ++--
+ drivers/pci/hotplug/s390_pci_hpc.c | 12 +++++-------
+ 3 files changed, 19 insertions(+), 16 deletions(-)
+
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -672,6 +672,16 @@ int zpci_disable_device(struct zpci_dev
+ }
+ EXPORT_SYMBOL_GPL(zpci_disable_device);
+
++void zpci_remove_device(struct zpci_dev *zdev)
++{
++ struct zpci_bus *zbus = zdev->zbus;
++ struct pci_dev *pdev;
++
++ pdev = pci_get_slot(zbus->bus, zdev->devfn);
++ if (pdev)
++ pci_stop_and_remove_bus_device_locked(pdev);
++}
++
+ int zpci_create_device(struct zpci_dev *zdev)
+ {
+ int rc;
+@@ -716,13 +726,8 @@ void zpci_release_device(struct kref *kr
+ {
+ struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+
+- if (zdev->zbus->bus) {
+- struct pci_dev *pdev;
+-
+- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+- if (pdev)
+- pci_stop_and_remove_bus_device_locked(pdev);
+- }
++ if (zdev->zbus->bus)
++ zpci_remove_device(zdev);
+
+ switch (zdev->state) {
+ case ZPCI_FN_STATE_ONLINE:
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -118,7 +118,7 @@ static void __zpci_event_availability(st
+ if (!zdev)
+ break;
+ if (pdev)
+- pci_stop_and_remove_bus_device_locked(pdev);
++ zpci_remove_device(zdev);
+
+ ret = zpci_disable_device(zdev);
+ if (ret)
+@@ -137,7 +137,7 @@ static void __zpci_event_availability(st
+ /* Give the driver a hint that the function is
+ * already unusable. */
+ pdev->error_state = pci_channel_io_perm_failure;
+- pci_stop_and_remove_bus_device_locked(pdev);
++ zpci_remove_device(zdev);
+ }
+
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+--- a/drivers/pci/hotplug/s390_pci_hpc.c
++++ b/drivers/pci/hotplug/s390_pci_hpc.c
+@@ -83,21 +83,19 @@ static int disable_slot(struct hotplug_s
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
+ struct pci_dev *pdev;
+- struct zpci_bus *zbus = zdev->zbus;
+ int rc;
+
+ if (!zpci_fn_configured(zdev->state))
+ return -EIO;
+
+- pdev = pci_get_slot(zbus->bus, zdev->devfn);
+- if (pdev) {
+- if (pci_num_vf(pdev))
+- return -EBUSY;
+-
+- pci_stop_and_remove_bus_device_locked(pdev);
++ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
++ if (pdev && pci_num_vf(pdev)) {
+ pci_dev_put(pdev);
++ return -EBUSY;
+ }
+
++ zpci_remove_device(zdev);
++
+ rc = zpci_disable_device(zdev);
+ if (rc)
+ return rc;
--- /dev/null
+From 2d9a2c5f581be3991ba67fa9e7497c711220ea8e Mon Sep 17 00:00:00 2001
+From: Steffen Maier <maier@linux.ibm.com>
+Date: Thu, 13 Aug 2020 17:28:56 +0200
+Subject: scsi: zfcp: Fix use-after-free in request timeout handlers
+
+From: Steffen Maier <maier@linux.ibm.com>
+
+commit 2d9a2c5f581be3991ba67fa9e7497c711220ea8e upstream.
+
+Before v4.15 commit 75492a51568b ("s390/scsi: Convert timers to use
+timer_setup()"), we intentionally only passed zfcp_adapter as context
+argument to zfcp_fsf_request_timeout_handler(). Since we only trigger
+adapter recovery, it was unnecessary to sync against races between timeout
+and (late) completion. Likewise, we only passed zfcp_erp_action as context
+argument to zfcp_erp_timeout_handler(). Since we only wakeup an ERP action,
+it was unnecessary to sync against races between timeout and (late)
+completion.
+
+Meanwhile the timeout handlers get timer_list as context argument and do a
+timer-specific container-of to zfcp_fsf_req which can have been freed.
+
+Fix it by making sure that any request timeout handlers, that might just
+have started before del_timer(), are completed by using del_timer_sync()
+instead. This ensures the request free happens afterwards.
+
+Space time diagram of potential use-after-free:
+
+Basic idea is to have 2 or more pending requests whose timeouts run out at
+almost the same time.
+
+req 1 timeout ERP thread req 2 timeout
+---------------- ---------------- ---------------------------------------
+zfcp_fsf_request_timeout_handler
+fsf_req = from_timer(fsf_req, t, timer)
+adapter = fsf_req->adapter
+zfcp_qdio_siosl(adapter)
+zfcp_erp_adapter_reopen(adapter,...)
+ zfcp_erp_strategy
+ ...
+ zfcp_fsf_req_dismiss_all
+ list_for_each_entry_safe
+ zfcp_fsf_req_complete 1
+ del_timer 1
+ zfcp_fsf_req_free 1
+ zfcp_fsf_req_complete 2
+ zfcp_fsf_request_timeout_handler
+ del_timer 2
+ fsf_req = from_timer(fsf_req, t, timer)
+ zfcp_fsf_req_free 2
+ adapter = fsf_req->adapter
+ ^^^^^^^ already freed
+
+Link: https://lore.kernel.org/r/20200813152856.50088-1-maier@linux.ibm.com
+Fixes: 75492a51568b ("s390/scsi: Convert timers to use timer_setup()")
+Cc: <stable@vger.kernel.org> #4.15+
+Suggested-by: Julian Wiedmann <jwi@linux.ibm.com>
+Reviewed-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: Steffen Maier <maier@linux.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/scsi/zfcp_fsf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -434,7 +434,7 @@ static void zfcp_fsf_req_complete(struct
+ return;
+ }
+
+- del_timer(&req->timer);
++ del_timer_sync(&req->timer);
+ zfcp_fsf_protstatus_eval(req);
+ zfcp_fsf_fsfstatus_eval(req);
+ req->handler(req);
+@@ -867,7 +867,7 @@ static int zfcp_fsf_req_send(struct zfcp
+ req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
+ req->issued = get_tod_clock();
+ if (zfcp_qdio_send(qdio, &req->qdio_req)) {
+- del_timer(&req->timer);
++ del_timer_sync(&req->timer);
+ /* lookup request again, list might have changed */
+ zfcp_reqlist_find_rm(adapter->req_list, req_id);
+ zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
--- /dev/null
+From 98b0bf02738004829d7e26d6cb47b2e469aaba86 Mon Sep 17 00:00:00 2001
+From: Yang Weijiang <weijiang.yang@intel.com>
+Date: Fri, 14 Aug 2020 21:21:05 +0800
+Subject: selftests: kvm: Use a shorter encoding to clear RAX
+
+From: Yang Weijiang <weijiang.yang@intel.com>
+
+commit 98b0bf02738004829d7e26d6cb47b2e469aaba86 upstream.
+
+If debug_regs.c is built with newer binutils, the resulting binary is "optimized"
+by the assembler:
+
+asm volatile("ss_start: "
+ "xor %%rax,%%rax\n\t"
+ "cpuid\n\t"
+ "movl $0x1a0,%%ecx\n\t"
+ "rdmsr\n\t"
+ : : : "rax", "ecx");
+
+is translated to :
+
+ 000000000040194e <ss_start>:
+ 40194e: 31 c0 xor %eax,%eax <----- rax->eax?
+ 401950: 0f a2 cpuid
+ 401952: b9 a0 01 00 00 mov $0x1a0,%ecx
+ 401957: 0f 32 rdmsr
+
+As you can see rax is replaced with eax in target binary code.
+This causes a difference is the length of xor instruction (2 Byte vs 3 Byte),
+and makes the hard-coded instruction length check fail:
+
+ /* Instruction lengths starting at ss_start */
+ int ss_size[4] = {
+ 3, /* xor */ <-------- 2 or 3?
+ 2, /* cpuid */
+ 5, /* mov */
+ 2, /* rdmsr */
+ };
+
+Encode the shorter version directly and, while at it, fix the "clobbers"
+of the asm.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/kvm/x86_64/debug_regs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
++++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c
+@@ -40,11 +40,11 @@ static void guest_code(void)
+
+ /* Single step test, covers 2 basic instructions and 2 emulated */
+ asm volatile("ss_start: "
+- "xor %%rax,%%rax\n\t"
++ "xor %%eax,%%eax\n\t"
+ "cpuid\n\t"
+ "movl $0x1a0,%%ecx\n\t"
+ "rdmsr\n\t"
+- : : : "rax", "ecx");
++ : : : "eax", "ebx", "ecx", "edx");
+
+ /* DR6.BD test */
+ asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
alsa-hda-avoid-reset-of-sdo_limit.patch
alsa-hda-realtek-add-quirk-for-samsung-galaxy-flex-book.patch
alsa-hda-realtek-add-quirk-for-samsung-galaxy-book-ion.patch
+can-j1939-transport-j1939_session_tx_dat-fix-use-after-free-read-in-j1939_tp_txtimer.patch
+can-j1939-socket-j1939_sk_bind-make-sure-ml_priv-is-allocated.patch
+spi-prevent-adding-devices-below-an-unregistering-controller.patch
+io_uring-find-and-cancel-head-link-async-work-on-files-exit.patch
+mm-vunmap-add-cond_resched-in-vunmap_pmd_range.patch
+romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch
+kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch
+uprobes-__replace_page-avoid-bug-in-munlock_vma_page.patch
+squashfs-avoid-bio_alloc-failure-with-1mbyte-blocks.patch
+mm-include-cma-pages-in-lowmem_reserve-at-boot.patch
+mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch
+asoc-amd-renoir-restore-two-more-registers-during-resume.patch
+rdma-hfi1-correct-an-interlock-issue-for-tid-rdma-write-request.patch
+opp-enable-resources-again-if-they-were-disabled-earlier.patch
+opp-put-opp-table-in-dev_pm_opp_set_rate-for-empty-tables.patch
+opp-put-opp-table-in-dev_pm_opp_set_rate-if-_set_opp_bw-fails.patch
+ext4-do-not-block-rwf_nowait-dio-write-on-unallocated-space.patch
+ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch
+jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch
+scsi-zfcp-fix-use-after-free-in-request-timeout-handlers.patch
+kvm-pass-mmu-notifier-range-flags-to-kvm_unmap_hva_range.patch
+selftests-kvm-use-a-shorter-encoding-to-clear-rax.patch
+s390-pci-fix-zpci_bus_link_virtfn.patch
+s390-pci-re-introduce-zpci_remove_device.patch
+s390-pci-fix-pf-vf-linking-on-hot-plug.patch
+s390-pci-ignore-stale-configuration-request-event.patch
+mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch
+drm-amdgpu-use-the-correct-size-when-allocating-memory.patch
+drm-amdgpu-display-use-gfp_atomic-in-dcn20_validate_bandwidth_internal.patch
+drm-amd-display-fix-incorrect-backlight-register-offset-for-dcn.patch
+drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch
+drm-amd-display-blank-stream-before-destroying-hdcp-session.patch
+drm-amd-display-fix-dfpstate-hang-due-to-view-port-changed.patch
+drm-amd-display-fix-pow-crashing-when-given-base-0.patch
--- /dev/null
+From ddf75be47ca748f8b12d28ac64d624354fddf189 Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Mon, 3 Aug 2020 13:09:01 +0200
+Subject: spi: Prevent adding devices below an unregistering controller
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit ddf75be47ca748f8b12d28ac64d624354fddf189 upstream.
+
+CONFIG_OF_DYNAMIC and CONFIG_ACPI allow adding SPI devices at runtime
+using a DeviceTree overlay or DSDT patch. CONFIG_SPI_SLAVE allows the
+same via sysfs.
+
+But there are no precautions to prevent adding a device below a
+controller that's being removed. Such a device is unusable and may not
+even be able to unbind cleanly as it becomes inaccessible once the
+controller has been torn down. E.g. it is then impossible to quiesce
+the device's interrupt.
+
+of_spi_notify() and acpi_spi_notify() do hold a ref on the controller,
+but otherwise run lockless against spi_unregister_controller().
+
+Fix by holding the spi_add_lock in spi_unregister_controller() and
+bailing out of spi_add_device() if the controller has been unregistered
+concurrently.
+
+Fixes: ce79d54ae447 ("spi/of: Add OF notifier handler")
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org # v3.19+
+Cc: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: Octavian Purdila <octavian.purdila@intel.com>
+Cc: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+Link: https://lore.kernel.org/r/a8c3205088a969dc8410eec1eba9aface60f36af.1596451035.git.lukas@wunner.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/Kconfig | 3 +++
+ drivers/spi/spi.c | 21 ++++++++++++++++++++-
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -999,4 +999,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
+
+ endif # SPI_SLAVE
+
++config SPI_DYNAMIC
++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
++
+ endif # SPI
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
+ */
+ static DEFINE_MUTEX(board_lock);
+
++/*
++ * Prevents addition of devices with same chip select and
++ * addition of devices below an unregistering controller.
++ */
++static DEFINE_MUTEX(spi_add_lock);
++
+ /**
+ * spi_alloc_device - Allocate a new SPI device
+ * @ctlr: Controller to which device is connected
+@@ -554,7 +560,6 @@ static int spi_dev_check(struct device *
+ */
+ int spi_add_device(struct spi_device *spi)
+ {
+- static DEFINE_MUTEX(spi_add_lock);
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+ int status;
+@@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *sp
+ goto done;
+ }
+
++ /* Controller may unregister concurrently */
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
++ !device_is_registered(&ctlr->dev)) {
++ status = -ENODEV;
++ goto done;
++ }
++
+ /* Descriptors take precedence */
+ if (ctlr->cs_gpiods)
+ spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
+@@ -2764,6 +2776,10 @@ void spi_unregister_controller(struct sp
+ struct spi_controller *found;
+ int id = ctlr->bus_num;
+
++ /* Prevent addition of new devices, unregister existing ones */
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++ mutex_lock(&spi_add_lock);
++
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
+
+ /* First make sure that this controller was ever added */
+@@ -2784,6 +2800,9 @@ void spi_unregister_controller(struct sp
+ if (found == ctlr)
+ idr_remove(&spi_master_idr, id);
+ mutex_unlock(&board_lock);
++
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++ mutex_unlock(&spi_add_lock);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
--- /dev/null
+From f26044c83e6e473a61917f5db411d1417327d425 Mon Sep 17 00:00:00 2001
+From: Phillip Lougher <phillip@squashfs.org.uk>
+Date: Thu, 20 Aug 2020 17:42:21 -0700
+Subject: squashfs: avoid bio_alloc() failure with 1Mbyte blocks
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+commit f26044c83e6e473a61917f5db411d1417327d425 upstream.
+
+This is a regression introduced by the patch "migrate from ll_rw_block
+usage to BIO".
+
+Bio_alloc() is limited to 256 pages (1 Mbyte). This can cause a failure
+when reading 1 Mbyte block filesystems. The problem is a datablock can be
+fully (or almost uncompressed), requiring 256 pages, but, because blocks
+are not aligned to page boundaries, it may require 257 pages to read.
+
+Bio_kmalloc() can handle 1024 pages, and so use this for the edge
+condition.
+
+Fixes: 93e72b3c612a ("squashfs: migrate from ll_rw_block usage to BIO")
+Reported-by: Nicolas Prochazka <nicolas.prochazka@gmail.com>
+Reported-by: Tomoatsu Shimada <shimada@walbrix.com>
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Guenter Roeck <groeck@chromium.org>
+Cc: Philippe Liard <pliard@google.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Adrien Schildknecht <adrien+dev@schischi.me>
+Cc: Daniel Rosenberg <drosen@google.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200815035637.15319-1-phillip@squashfs.org.uk
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/squashfs/block.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/squashfs/block.c
++++ b/fs/squashfs/block.c
+@@ -87,7 +87,11 @@ static int squashfs_bio_read(struct supe
+ int error, i;
+ struct bio *bio;
+
+- bio = bio_alloc(GFP_NOIO, page_count);
++ if (page_count <= BIO_MAX_PAGES)
++ bio = bio_alloc(GFP_NOIO, page_count);
++ else
++ bio = bio_kmalloc(GFP_NOIO, page_count);
++
+ if (!bio)
+ return -ENOMEM;
+
--- /dev/null
+From c17c3dc9d08b9aad9a55a1e53f205187972f448e Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 20 Aug 2020 17:42:17 -0700
+Subject: uprobes: __replace_page() avoid BUG in munlock_vma_page()
+
+From: Hugh Dickins <hughd@google.com>
+
+commit c17c3dc9d08b9aad9a55a1e53f205187972f448e upstream.
+
+syzbot crashed on the VM_BUG_ON_PAGE(PageTail) in munlock_vma_page(), when
+called from uprobes __replace_page(). Which of many ways to fix it?
+Settled on not calling when PageCompound (since Head and Tail are equals
+in this context, PageCompound the usual check in uprobes.c, and the prior
+use of FOLL_SPLIT_PMD will have cleared PageMlocked already).
+
+Fixes: 5a52c9df62b4 ("uprobe: use FOLL_SPLIT_PMD instead of FOLL_SPLIT")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org> [5.4+]
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008161338360.20413@eggly.anvils
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/uprobes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -205,7 +205,7 @@ static int __replace_page(struct vm_area
+ try_to_free_swap(old_page);
+ page_vma_mapped_walk_done(&pvmw);
+
+- if (vma->vm_flags & VM_LOCKED)
++ if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
+ munlock_vma_page(old_page);
+ put_page(old_page);
+