From: Sasha Levin Date: Sun, 30 Jan 2022 15:27:59 +0000 (-0500) Subject: Fixes for 5.15 X-Git-Tag: v5.4.176~20^2~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=bed775a262a9171bdce6c449176b16c9255afcc4;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.15 Signed-off-by: Sasha Levin --- diff --git a/queue-5.15/can-tcan4x5x-regmap-fix-max-register-value.patch b/queue-5.15/can-tcan4x5x-regmap-fix-max-register-value.patch new file mode 100644 index 00000000000..a8a02ab517c --- /dev/null +++ b/queue-5.15/can-tcan4x5x-regmap-fix-max-register-value.patch @@ -0,0 +1,40 @@ +From 9c430af3d3261dee088860a4782f24acfc64b041 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 14 Jan 2022 18:50:54 +0100 +Subject: can: tcan4x5x: regmap: fix max register value + +From: Marc Kleine-Budde + +[ Upstream commit e59986de5ff701494e14c722b78b6e6d513e0ab5 ] + +The MRAM of the tcan4x5x has a size of 2K and starts at 0x8000. There +are no further registers in the tcan4x5x making 0x87fc the biggest +addressable register. + +This patch fixes the max register value of the regmap config from +0x8ffc to 0x87fc. + +Fixes: 6e1caaf8ed22 ("can: tcan4x5x: fix max register value") +Link: https://lore.kernel.org/all/20220119064011.2943292-1-mkl@pengutronix.de +Signed-off-by: Marc Kleine-Budde +Signed-off-by: Sasha Levin +--- + drivers/net/can/m_can/tcan4x5x-regmap.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c +index ca80dbaf7a3f5..26e212b8ca7a6 100644 +--- a/drivers/net/can/m_can/tcan4x5x-regmap.c ++++ b/drivers/net/can/m_can/tcan4x5x-regmap.c +@@ -12,7 +12,7 @@ + #define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24) + #define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24) + +-#define TCAN4X5X_MAX_REGISTER 0x8ffc ++#define TCAN4X5X_MAX_REGISTER 0x87fc + + static int tcan4x5x_regmap_gather_write(void *context, + const void *reg, size_t reg_len, +-- +2.34.1 + diff --git a/queue-5.15/ceph-put-the-requests-sessions-when-it-fails-to-allo.patch b/queue-5.15/ceph-put-the-requests-sessions-when-it-fails-to-allo.patch new file mode 100644 index 00000000000..3fb93370fc2 --- /dev/null +++ b/queue-5.15/ceph-put-the-requests-sessions-when-it-fails-to-allo.patch @@ -0,0 +1,150 @@ +From 3f1171ae855be7d1c7eb71d56a24357a50f98e05 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 12 Jan 2022 12:29:04 +0800 +Subject: ceph: put the requests/sessions when it fails to alloc memory + +From: Xiubo Li + +[ Upstream commit 89d43d0551a848e70e63d9ba11534aaeabc82443 ] + +When failing to allocate the sessions memory we should make sure +the req1 and req2 and the sessions get put. And also in case the +max_sessions decreased so when kreallocate the new memory some +sessions maybe missed being put. + +And if the max_sessions is 0 krealloc will return ZERO_SIZE_PTR, +which will lead to a distinct access fault. + +URL: https://tracker.ceph.com/issues/53819 +Fixes: e1a4541ec0b9 ("ceph: flush the mdlog before waiting on unsafe reqs") +Signed-off-by: Xiubo Li +Reviewed-by: Venky Shankar +Reviewed-by: Jeff Layton +Signed-off-by: Ilya Dryomov +Signed-off-by: Sasha Levin +--- + fs/ceph/caps.c | 55 +++++++++++++++++++++++++++++++++----------------- + 1 file changed, 37 insertions(+), 18 deletions(-) + +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index 8be4da2e2b826..09900a9015ea6 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -2217,6 +2217,7 @@ static int unsafe_request_wait(struct inode *inode) + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_mds_request *req1 = NULL, *req2 = NULL; ++ unsigned int max_sessions; + int ret, err = 0; + + spin_lock(&ci->i_unsafe_lock); +@@ -2234,37 +2235,45 @@ static int unsafe_request_wait(struct inode *inode) + } + spin_unlock(&ci->i_unsafe_lock); + ++ /* ++ * The mdsc->max_sessions is unlikely to be changed ++ * mostly, here we will retry it by reallocating the ++ * sessions array memory to get rid of the mdsc->mutex ++ * lock. ++ */ ++retry: ++ max_sessions = mdsc->max_sessions; ++ + /* + * Trigger to flush the journal logs in all the relevant MDSes + * manually, or in the worst case we must wait at most 5 seconds + * to wait the journal logs to be flushed by the MDSes periodically. + */ +- if (req1 || req2) { ++ if ((req1 || req2) && likely(max_sessions)) { + struct ceph_mds_session **sessions = NULL; + struct ceph_mds_session *s; + struct ceph_mds_request *req; +- unsigned int max; + int i; + +- /* +- * The mdsc->max_sessions is unlikely to be changed +- * mostly, here we will retry it by reallocating the +- * sessions arrary memory to get rid of the mdsc->mutex +- * lock. +- */ +-retry: +- max = mdsc->max_sessions; +- sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO); +- if (!sessions) +- return -ENOMEM; ++ sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL); ++ if (!sessions) { ++ err = -ENOMEM; ++ goto out; ++ } + + spin_lock(&ci->i_unsafe_lock); + if (req1) { + list_for_each_entry(req, &ci->i_unsafe_dirops, + r_unsafe_dir_item) { + s = req->r_session; +- if (unlikely(s->s_mds >= max)) { ++ if (unlikely(s->s_mds >= max_sessions)) { + spin_unlock(&ci->i_unsafe_lock); ++ for (i = 0; i < max_sessions; i++) { ++ s = sessions[i]; ++ if (s) ++ ceph_put_mds_session(s); ++ } ++ kfree(sessions); + goto retry; + } + if (!sessions[s->s_mds]) { +@@ -2277,8 +2286,14 @@ retry: + list_for_each_entry(req, &ci->i_unsafe_iops, + r_unsafe_target_item) { + s = req->r_session; +- if (unlikely(s->s_mds >= max)) { ++ if (unlikely(s->s_mds >= max_sessions)) { + spin_unlock(&ci->i_unsafe_lock); ++ for (i = 0; i < max_sessions; i++) { ++ s = sessions[i]; ++ if (s) ++ ceph_put_mds_session(s); ++ } ++ kfree(sessions); + goto retry; + } + if (!sessions[s->s_mds]) { +@@ -2299,7 +2314,7 @@ retry: + spin_unlock(&ci->i_ceph_lock); + + /* send flush mdlog request to MDSes */ +- for (i = 0; i < max; i++) { ++ for (i = 0; i < max_sessions; i++) { + s = sessions[i]; + if (s) { + send_flush_mdlog(s); +@@ -2316,15 +2331,19 @@ retry: + ceph_timeout_jiffies(req1->r_timeout)); + if (ret) + err = -EIO; +- ceph_mdsc_put_request(req1); + } + if (req2) { + ret = !wait_for_completion_timeout(&req2->r_safe_completion, + ceph_timeout_jiffies(req2->r_timeout)); + if (ret) + err = -EIO; +- ceph_mdsc_put_request(req2); + } ++ ++out: ++ if (req1) ++ ceph_mdsc_put_request(req1); ++ if (req2) ++ ceph_mdsc_put_request(req2); + return err; + } + +-- +2.34.1 + diff --git a/queue-5.15/drivers-hv-balloon-account-for-vmbus-packet-header-i.patch b/queue-5.15/drivers-hv-balloon-account-for-vmbus-packet-header-i.patch new file mode 100644 index 00000000000..116874c13bf --- /dev/null +++ b/queue-5.15/drivers-hv-balloon-account-for-vmbus-packet-header-i.patch @@ -0,0 +1,58 @@ +From 56ec76687951bbb599a81dc12723b28f3966b72c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 20 Jan 2022 04:20:52 +0800 +Subject: Drivers: hv: balloon: account for vmbus packet header in max_pkt_size + +From: Yanming Liu + +[ Upstream commit 96d9d1fa5cd505078534113308ced0aa56d8da58 ] + +Commit adae1e931acd ("Drivers: hv: vmbus: Copy packets sent by Hyper-V +out of the ring buffer") introduced a notion of maximum packet size in +vmbus channel and used that size to initialize a buffer holding all +incoming packet along with their vmbus packet header. hv_balloon uses +the default maximum packet size VMBUS_DEFAULT_MAX_PKT_SIZE which matches +its maximum message size, however vmbus_open expects this size to also +include vmbus packet header. This leads to 4096 bytes +dm_unballoon_request messages being truncated to 4080 bytes. When the +driver tries to read next packet it starts from a wrong read_index, +receives garbage and prints a lot of "Unhandled message: type: +" in dmesg. + +Allocate the buffer with HV_HYP_PAGE_SIZE more bytes to make room for +the header. + +Fixes: adae1e931acd ("Drivers: hv: vmbus: Copy packets sent by Hyper-V out of the ring buffer") +Suggested-by: Michael Kelley (LINUX) +Suggested-by: Andrea Parri (Microsoft) +Signed-off-by: Yanming Liu +Reviewed-by: Michael Kelley +Reviewed-by: Andrea Parri (Microsoft) +Link: https://lore.kernel.org/r/20220119202052.3006981-1-yanminglr@gmail.com +Signed-off-by: Wei Liu +Signed-off-by: Sasha Levin +--- + drivers/hv/hv_balloon.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +index ca873a3b98dbe..f2d05bff42453 100644 +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -1660,6 +1660,13 @@ static int balloon_connect_vsp(struct hv_device *dev) + unsigned long t; + int ret; + ++ /* ++ * max_pkt_size should be large enough for one vmbus packet header plus ++ * our receive buffer size. Hyper-V sends messages up to ++ * HV_HYP_PAGE_SIZE bytes long on balloon channel. ++ */ ++ dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2; ++ + ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, + balloon_onchannelcallback, dev); + if (ret) +-- +2.34.1 + diff --git a/queue-5.15/drm-msm-a6xx-add-missing-suspend_count-increment.patch b/queue-5.15/drm-msm-a6xx-add-missing-suspend_count-increment.patch new file mode 100644 index 00000000000..ea8ecebcb8b --- /dev/null +++ b/queue-5.15/drm-msm-a6xx-add-missing-suspend_count-increment.patch @@ -0,0 +1,36 @@ +From a63e11fbe14264a579d98f7bd2e5735eb3d7f38f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 13 Jan 2022 08:32:13 -0800 +Subject: drm/msm/a6xx: Add missing suspend_count increment + +From: Rob Clark + +[ Upstream commit 860a7b2a87b7c743154824d0597b6c3eb3b53154 ] + +Reported-by: Danylo Piliaiev +Fixes: 3ab1c5cc3939 ("drm/msm: Add param for userspace to query suspend count") +Signed-off-by: Rob Clark +Reviewed-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/20220113163215.215367-1-robdclark@gmail.com +Signed-off-by: Dmitry Baryshkov +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 723074aae5b63..b681c45520bbd 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1557,6 +1557,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) + for (i = 0; i < gpu->nr_rings; i++) + a6xx_gpu->shadow[i] = 0; + ++ gpu->suspend_count++; ++ + return 0; + } + +-- +2.34.1 + diff --git a/queue-5.15/drm-msm-dpu-invalid-parameter-check-in-dpu_setup_dsp.patch b/queue-5.15/drm-msm-dpu-invalid-parameter-check-in-dpu_setup_dsp.patch new file mode 100644 index 00000000000..e17aef51a2c --- /dev/null +++ b/queue-5.15/drm-msm-dpu-invalid-parameter-check-in-dpu_setup_dsp.patch @@ -0,0 +1,54 @@ +From 845360584f051ee39eb8a65b0fbd5a9fa32781d1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 9 Jan 2022 20:24:31 +0100 +Subject: drm/msm/dpu: invalid parameter check in dpu_setup_dspp_pcc +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: José Expósito + +[ Upstream commit 170b22234d5495f5e0844246e23f004639ee89ba ] + +The function performs a check on the "ctx" input parameter, however, it +is used before the check. + +Initialize the "base" variable after the sanity check to avoid a +possible NULL pointer dereference. + +Fixes: 4259ff7ae509e ("drm/msm/dpu: add support for pcc color block in dpu driver") +Addresses-Coverity-ID: 1493866 ("Null pointer dereference") +Signed-off-by: José Expósito +Link: https://lore.kernel.org/r/20220109192431.135949-1-jose.exposito89@gmail.com +Signed-off-by: Dmitry Baryshkov +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +index a98e964c3b6fa..355894a3b48c3 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, + struct dpu_hw_pcc_cfg *cfg) + { + +- u32 base = ctx->cap->sblk->pcc.base; ++ u32 base; + +- if (!ctx || !base) { ++ if (!ctx) { ++ DRM_ERROR("invalid ctx %pK\n", ctx); ++ return; ++ } ++ ++ base = ctx->cap->sblk->pcc.base; ++ ++ if (!base) { + DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base); + return; + } +-- +2.34.1 + diff --git a/queue-5.15/drm-msm-dsi-fix-missing-put_device-call-in-dsi_get_p.patch b/queue-5.15/drm-msm-dsi-fix-missing-put_device-call-in-dsi_get_p.patch new file mode 100644 index 00000000000..c636da6a87e --- /dev/null +++ b/queue-5.15/drm-msm-dsi-fix-missing-put_device-call-in-dsi_get_p.patch @@ -0,0 +1,44 @@ +From 8ffa265476da4417744f1d97c3821bbb2f9fbde8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 30 Dec 2021 07:09:40 +0000 +Subject: drm/msm/dsi: Fix missing put_device() call in dsi_get_phy + +From: Miaoqian Lin + +[ Upstream commit c04c3148ca12227d92f91b355b4538cc333c9922 ] + +If of_find_device_by_node() succeeds, dsi_get_phy() doesn't +a corresponding put_device(). Thus add put_device() to fix the exception +handling. + +Fixes: ec31abf ("drm/msm/dsi: Separate PHY to another platform device") +Signed-off-by: Miaoqian Lin +Reviewed-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/20211230070943.18116-1-linmq006@gmail.com +Signed-off-by: Dmitry Baryshkov +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/dsi/dsi.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c +index fc280cc434943..122fadcf7cc1e 100644 +--- a/drivers/gpu/drm/msm/dsi/dsi.c ++++ b/drivers/gpu/drm/msm/dsi/dsi.c +@@ -40,7 +40,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) + + of_node_put(phy_node); + +- if (!phy_pdev || !msm_dsi->phy) { ++ if (!phy_pdev) { ++ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); ++ return -EPROBE_DEFER; ++ } ++ if (!msm_dsi->phy) { ++ put_device(&phy_pdev->dev); + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } +-- +2.34.1 + diff --git a/queue-5.15/drm-msm-dsi-invalid-parameter-check-in-msm_dsi_phy_e.patch b/queue-5.15/drm-msm-dsi-invalid-parameter-check-in-msm_dsi_phy_e.patch new file mode 100644 index 00000000000..5f600deb905 --- /dev/null +++ b/queue-5.15/drm-msm-dsi-invalid-parameter-check-in-msm_dsi_phy_e.patch @@ -0,0 +1,52 @@ +From 6405b8c7791a24a820fddecaa579f3bb88ecfb85 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 16 Jan 2022 19:18:44 +0100 +Subject: drm/msm/dsi: invalid parameter check in msm_dsi_phy_enable +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: José Expósito + +[ Upstream commit 5e761a2287234bc402ba7ef07129f5103bcd775c ] + +The function performs a check on the "phy" input parameter, however, it +is used before the check. + +Initialize the "dev" variable after the sanity check to avoid a possible +NULL pointer dereference. + +Fixes: 5c8290284402b ("drm/msm/dsi: Split PHY drivers to separate files") +Addresses-Coverity-ID: 1493860 ("Null pointer dereference") +Signed-off-by: José Expósito +Reviewed-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/20220116181844.7400-1-jose.exposito89@gmail.com +Signed-off-by: Dmitry Baryshkov +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +index 8c65ef6968caf..a878b8b079c64 100644 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +@@ -806,12 +806,14 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req, + struct msm_dsi_phy_shared_timings *shared_timings) + { +- struct device *dev = &phy->pdev->dev; ++ struct device *dev; + int ret; + + if (!phy || !phy->cfg->ops.enable) + return -EINVAL; + ++ dev = &phy->pdev->dev; ++ + ret = dsi_phy_enable_resource(phy); + if (ret) { + DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n", +-- +2.34.1 + diff --git a/queue-5.15/drm-msm-fix-wrong-size-calculation.patch b/queue-5.15/drm-msm-fix-wrong-size-calculation.patch new file mode 100644 index 00000000000..d8333956c87 --- /dev/null +++ b/queue-5.15/drm-msm-fix-wrong-size-calculation.patch @@ -0,0 +1,46 @@ +From 96587afe86b709b7a1e0917945fa2115d9b2ca25 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 12 Jan 2022 20:33:34 +0800 +Subject: drm/msm: Fix wrong size calculation + +From: Xianting Tian + +[ Upstream commit 0a727b459ee39bd4c5ced19d6024258ac87b6b2e ] + +For example, memory-region in .dts as below, + reg = <0x0 0x50000000 0x0 0x20000000> + +We can get below values, +struct resource r; +r.start = 0x50000000; +r.end = 0x6fffffff; + +So the size should be: +size = r.end - r.start + 1 = 0x20000000 + +Signed-off-by: Xianting Tian +Fixes: 072f1f9168ed ("drm/msm: add support for "stolen" mem") +Reviewed-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/20220112123334.749776-1-xianting.tian@linux.alibaba.com +Signed-off-by: Dmitry Baryshkov +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/msm_drv.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c +index 27f737a253c77..bbf999c665174 100644 +--- a/drivers/gpu/drm/msm/msm_drv.c ++++ b/drivers/gpu/drm/msm/msm_drv.c +@@ -437,7 +437,7 @@ static int msm_init_vram(struct drm_device *dev) + of_node_put(node); + if (ret) + return ret; +- size = r.end - r.start; ++ size = r.end - r.start + 1; + DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); + + /* if we have no IOMMU, then we need to use carveout allocator. +-- +2.34.1 + diff --git a/queue-5.15/drm-msm-hdmi-fix-missing-put_device-call-in-msm_hdmi.patch b/queue-5.15/drm-msm-hdmi-fix-missing-put_device-call-in-msm_hdmi.patch new file mode 100644 index 00000000000..e1fc36cc732 --- /dev/null +++ b/queue-5.15/drm-msm-hdmi-fix-missing-put_device-call-in-msm_hdmi.patch @@ -0,0 +1,47 @@ +From 3cb4b51abcace798d97246f5209cf895a18c73ab Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 7 Jan 2022 08:50:22 +0000 +Subject: drm/msm/hdmi: Fix missing put_device() call in msm_hdmi_get_phy + +From: Miaoqian Lin + +[ Upstream commit 774fe0cd838d1b1419d41ab4ea0613c80d4ecbd7 ] + +The reference taken by 'of_find_device_by_node()' must be released when +not needed anymore. +Add the corresponding 'put_device()' in the error handling path. + +Fixes: e00012b256d4 ("drm/msm/hdmi: Make HDMI core get its PHY") +Signed-off-by: Miaoqian Lin +Reviewed-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/20220107085026.23831-1-linmq006@gmail.com +Signed-off-by: Dmitry Baryshkov +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/hdmi/hdmi.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c +index 737453b6e5966..94f948ef279d1 100644 +--- a/drivers/gpu/drm/msm/hdmi/hdmi.c ++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c +@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi) + + of_node_put(phy_node); + +- if (!phy_pdev || !hdmi->phy) { ++ if (!phy_pdev) { + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); + return -EPROBE_DEFER; + } ++ if (!hdmi->phy) { ++ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); ++ put_device(&phy_pdev->dev); ++ return -EPROBE_DEFER; ++ } + + hdmi->phy_dev = get_device(&phy_pdev->dev); + +-- +2.34.1 + diff --git a/queue-5.15/gve-fix-gfp-flags-when-allocing-pages.patch b/queue-5.15/gve-fix-gfp-flags-when-allocing-pages.patch new file mode 100644 index 00000000000..44f49480de2 --- /dev/null +++ b/queue-5.15/gve-fix-gfp-flags-when-allocing-pages.patch @@ -0,0 +1,97 @@ +From 126c2edafb74391f5b1522a2b92f27441565bed1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Jan 2022 16:38:43 -0800 +Subject: gve: Fix GFP flags when allocing pages + +From: Catherine Sullivan + +[ Upstream commit a92f7a6feeb3884c69c1c7c1f13bccecb2228ad0 ] + +Use GFP_ATOMIC when allocating pages out of the hotpath, +continue to use GFP_KERNEL when allocating pages during setup. + +GFP_KERNEL will allow blocking which allows it to succeed +more often in a low memory enviornment but in the hotpath we do +not want to allow the allocation to block. + +Fixes: f5cedc84a30d2 ("gve: Add transmit and receive support") +Signed-off-by: Catherine Sullivan +Signed-off-by: David Awogbemila +Link: https://lore.kernel.org/r/20220126003843.3584521-1-awogbemila@google.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/google/gve/gve.h | 2 +- + drivers/net/ethernet/google/gve/gve_main.c | 6 +++--- + drivers/net/ethernet/google/gve/gve_rx.c | 3 ++- + drivers/net/ethernet/google/gve/gve_rx_dqo.c | 2 +- + 4 files changed, 7 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h +index c1d4042671f9f..b1273dce4795b 100644 +--- a/drivers/net/ethernet/google/gve/gve.h ++++ b/drivers/net/ethernet/google/gve/gve.h +@@ -815,7 +815,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv) + /* buffers */ + int gve_alloc_page(struct gve_priv *priv, struct device *dev, + struct page **page, dma_addr_t *dma, +- enum dma_data_direction); ++ enum dma_data_direction, gfp_t gfp_flags); + void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, + enum dma_data_direction); + /* tx handling */ +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c +index 959352fceead7..68552848d3888 100644 +--- a/drivers/net/ethernet/google/gve/gve_main.c ++++ b/drivers/net/ethernet/google/gve/gve_main.c +@@ -746,9 +746,9 @@ static void gve_free_rings(struct gve_priv *priv) + + int gve_alloc_page(struct gve_priv *priv, struct device *dev, + struct page **page, dma_addr_t *dma, +- enum dma_data_direction dir) ++ enum dma_data_direction dir, gfp_t gfp_flags) + { +- *page = alloc_page(GFP_KERNEL); ++ *page = alloc_page(gfp_flags); + if (!*page) { + priv->page_alloc_fail++; + return -ENOMEM; +@@ -792,7 +792,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, + for (i = 0; i < pages; i++) { + err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], + &qpl->page_buses[i], +- gve_qpl_dma_dir(priv, id)); ++ gve_qpl_dma_dir(priv, id), GFP_KERNEL); + /* caller handles clean up */ + if (err) + return -ENOMEM; +diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c +index 16169f291ad9f..629d8ed08fc61 100644 +--- a/drivers/net/ethernet/google/gve/gve_rx.c ++++ b/drivers/net/ethernet/google/gve/gve_rx.c +@@ -79,7 +79,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, + dma_addr_t dma; + int err; + +- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE); ++ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE, ++ GFP_ATOMIC); + if (err) + return err; + +diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c +index 8500621b2cd41..7b18b4fd9e548 100644 +--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c ++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c +@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv, + int err; + + err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page, +- &buf_state->addr, DMA_FROM_DEVICE); ++ &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL); + if (err) + return err; + +-- +2.34.1 + diff --git a/queue-5.15/hwmon-adt7470-prevent-divide-by-zero-in-adt7470_fan_.patch b/queue-5.15/hwmon-adt7470-prevent-divide-by-zero-in-adt7470_fan_.patch new file mode 100644 index 00000000000..04b40ff7bfd --- /dev/null +++ b/queue-5.15/hwmon-adt7470-prevent-divide-by-zero-in-adt7470_fan_.patch @@ -0,0 +1,39 @@ +From 716e3bfd2b82b230ad41b0b01382bc8029eae918 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Jan 2022 14:55:43 +0300 +Subject: hwmon: (adt7470) Prevent divide by zero in adt7470_fan_write() + +From: Dan Carpenter + +[ Upstream commit c1ec0cabc36718efc7fe8b4157d41b82d08ec1d2 ] + +The "val" variable is controlled by the user and comes from +hwmon_attr_store(). The FAN_RPM_TO_PERIOD() macro divides by "val" +so a zero will crash the system. Check for that and return -EINVAL. +Negatives are also invalid so return -EINVAL for those too. + +Fixes: fc958a61ff6d ("hwmon: (adt7470) Convert to devm_hwmon_device_register_with_info API") +Signed-off-by: Dan Carpenter +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/adt7470.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c +index d519aca4a9d64..fb6d14d213a18 100644 +--- a/drivers/hwmon/adt7470.c ++++ b/drivers/hwmon/adt7470.c +@@ -662,6 +662,9 @@ static int adt7470_fan_write(struct device *dev, u32 attr, int channel, long val + struct adt7470_data *data = dev_get_drvdata(dev); + int err; + ++ if (val <= 0) ++ return -EINVAL; ++ + val = FAN_RPM_TO_PERIOD(val); + val = clamp_val(val, 1, 65534); + +-- +2.34.1 + diff --git a/queue-5.15/hwmon-lm90-fix-sysfs-and-udev-notifications.patch b/queue-5.15/hwmon-lm90-fix-sysfs-and-udev-notifications.patch new file mode 100644 index 00000000000..00c0502b567 --- /dev/null +++ b/queue-5.15/hwmon-lm90-fix-sysfs-and-udev-notifications.patch @@ -0,0 +1,56 @@ +From 4154910da7afd6992937d876aea66809093bfb43 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 10 Jan 2022 23:23:31 -0800 +Subject: hwmon: (lm90) Fix sysfs and udev notifications + +From: Guenter Roeck + +[ Upstream commit d379880d9adb9f1ada3f1266aa49ea2561328e08 ] + +sysfs and udev notifications need to be sent to the _alarm +attributes, not to the value attributes. + +Fixes: 94dbd23ed88c ("hwmon: (lm90) Use hwmon_notify_event()") +Cc: Dmitry Osipenko +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/lm90.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c +index ba01127c1deb1..1c9493c708132 100644 +--- a/drivers/hwmon/lm90.c ++++ b/drivers/hwmon/lm90.c +@@ -1808,22 +1808,22 @@ static bool lm90_is_tripped(struct i2c_client *client, u16 *status) + + if (st & LM90_STATUS_LLOW) + hwmon_notify_event(data->hwmon_dev, hwmon_temp, +- hwmon_temp_min, 0); ++ hwmon_temp_min_alarm, 0); + if (st & LM90_STATUS_RLOW) + hwmon_notify_event(data->hwmon_dev, hwmon_temp, +- hwmon_temp_min, 1); ++ hwmon_temp_min_alarm, 1); + if (st2 & MAX6696_STATUS2_R2LOW) + hwmon_notify_event(data->hwmon_dev, hwmon_temp, +- hwmon_temp_min, 2); ++ hwmon_temp_min_alarm, 2); + if (st & LM90_STATUS_LHIGH) + hwmon_notify_event(data->hwmon_dev, hwmon_temp, +- hwmon_temp_max, 0); ++ hwmon_temp_max_alarm, 0); + if (st & LM90_STATUS_RHIGH) + hwmon_notify_event(data->hwmon_dev, hwmon_temp, +- hwmon_temp_max, 1); ++ hwmon_temp_max_alarm, 1); + if (st2 & MAX6696_STATUS2_R2HIGH) + hwmon_notify_event(data->hwmon_dev, hwmon_temp, +- hwmon_temp_max, 2); ++ hwmon_temp_max_alarm, 2); + + return true; + } +-- +2.34.1 + diff --git a/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6646-6647-664.patch b/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6646-6647-664.patch new file mode 100644 index 00000000000..30e67984340 --- /dev/null +++ b/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6646-6647-664.patch @@ -0,0 +1,36 @@ +From 3f28c93d8d83322cec04a0062bc1fd2db2eeb7f6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 7 Jan 2022 12:36:41 -0800 +Subject: hwmon: (lm90) Mark alert as broken for MAX6646/6647/6649 + +From: Guenter Roeck + +[ Upstream commit f614629f9c1080dcc844a8430e3fb4c37ebbf05d ] + +Experiments with MAX6646 and MAX6648 show that the alert function of those +chips is broken, similar to other chips supported by the lm90 driver. +Mark it accordingly. + +Fixes: 4667bcb8d8fc ("hwmon: (lm90) Introduce chip parameter structure") +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/lm90.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c +index 06cb971c889b4..ba01127c1deb1 100644 +--- a/drivers/hwmon/lm90.c ++++ b/drivers/hwmon/lm90.c +@@ -394,7 +394,7 @@ static const struct lm90_params lm90_params[] = { + .max_convrate = 9, + }, + [max6646] = { +- .flags = LM90_HAVE_CRIT, ++ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT, + .alert_alarms = 0x7c, + .max_convrate = 6, + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, +-- +2.34.1 + diff --git a/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6654.patch b/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6654.patch new file mode 100644 index 00000000000..fd59967fbda --- /dev/null +++ b/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6654.patch @@ -0,0 +1,35 @@ +From 885ec32fb491a1ff6d2cc43463064a2d75b9fb62 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 7 Jan 2022 11:05:23 -0800 +Subject: hwmon: (lm90) Mark alert as broken for MAX6654 + +From: Guenter Roeck + +[ Upstream commit a53fff96f35763d132a36c620b183fdf11022d7a ] + +Experiments with MAX6654 show that its alert function is broken, +similar to other chips supported by the lm90 driver. Mark it accordingly. + +Fixes: 229d495d8189 ("hwmon: (lm90) Add max6654 support to lm90 driver") +Cc: Josh Lehan +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/lm90.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c +index e4ecf3440d7cf..280ae5f58187b 100644 +--- a/drivers/hwmon/lm90.c ++++ b/drivers/hwmon/lm90.c +@@ -400,6 +400,7 @@ static const struct lm90_params lm90_params[] = { + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + }, + [max6654] = { ++ .flags = LM90_HAVE_BROKEN_ALERT, + .alert_alarms = 0x7c, + .max_convrate = 7, + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, +-- +2.34.1 + diff --git a/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6680.patch b/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6680.patch new file mode 100644 index 00000000000..ec07c76c64c --- /dev/null +++ b/queue-5.15/hwmon-lm90-mark-alert-as-broken-for-max6680.patch @@ -0,0 +1,36 @@ +From 9fabd7a44cfcfca9b0ad7188a43bb484d43b0ef3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 7 Jan 2022 11:11:00 -0800 +Subject: hwmon: (lm90) Mark alert as broken for MAX6680 + +From: Guenter Roeck + +[ Upstream commit 94746b0ba479743355e0d3cc1cb9cfe3011fb8be ] + +Experiments with MAX6680 and MAX6681 show that the alert function of those +chips is broken, similar to other chips supported by the lm90 driver. +Mark it accordingly. + +Fixes: 4667bcb8d8fc ("hwmon: (lm90) Introduce chip parameter structure") +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/lm90.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c +index 280ae5f58187b..06cb971c889b4 100644 +--- a/drivers/hwmon/lm90.c ++++ b/drivers/hwmon/lm90.c +@@ -419,7 +419,7 @@ static const struct lm90_params lm90_params[] = { + }, + [max6680] = { + .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT +- | LM90_HAVE_CRIT_ALRM_SWP, ++ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT, + .alert_alarms = 0x7c, + .max_convrate = 7, + }, +-- +2.34.1 + diff --git a/queue-5.15/hwmon-lm90-re-enable-interrupts-after-alert-clears.patch b/queue-5.15/hwmon-lm90-re-enable-interrupts-after-alert-clears.patch new file mode 100644 index 00000000000..de6037287e5 --- /dev/null +++ b/queue-5.15/hwmon-lm90-re-enable-interrupts-after-alert-clears.patch @@ -0,0 +1,41 @@ +From cb85f102cc7652a3c55bf93ccdf4e258e1cf0290 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 8 Jan 2022 11:37:19 -0800 +Subject: hwmon: (lm90) Re-enable interrupts after alert clears + +From: Guenter Roeck + +[ Upstream commit bc341a1a98827925082e95db174734fc8bd68af6 ] + +If alert handling is broken, interrupts are disabled after an alert and +re-enabled after the alert clears. However, if there is an interrupt +handler, this does not apply if alerts were originally disabled and enabled +when the driver was loaded. In that case, interrupts will stay disabled +after an alert was handled though the alert handler even after the alert +condition clears. Address the situation by always re-enabling interrupts +after the alert condition clears if there is an interrupt handler. + +Fixes: 2abdc357c55d9 ("hwmon: (lm90) Unmask hardware interrupt") +Cc: Dmitry Osipenko +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/lm90.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c +index cc5e48fe304b1..e4ecf3440d7cf 100644 +--- a/drivers/hwmon/lm90.c ++++ b/drivers/hwmon/lm90.c +@@ -848,7 +848,7 @@ static int lm90_update_device(struct device *dev) + * Re-enable ALERT# output if it was originally enabled and + * relevant alarms are all clear + */ +- if (!(data->config_orig & 0x80) && ++ if ((client->irq || !(data->config_orig & 0x80)) && + !(data->alarms & data->alert_alarms)) { + if (data->config & 0x80) { + dev_dbg(&client->dev, "Re-enabling ALERT#\n"); +-- +2.34.1 + diff --git a/queue-5.15/hwmon-lm90-reduce-maximum-conversion-rate-for-g781.patch b/queue-5.15/hwmon-lm90-reduce-maximum-conversion-rate-for-g781.patch new file mode 100644 index 00000000000..5b5b1863e2c --- /dev/null +++ b/queue-5.15/hwmon-lm90-reduce-maximum-conversion-rate-for-g781.patch @@ -0,0 +1,44 @@ +From 14ed037fb63f8d9992e58f55b8c9dfad50834a15 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 Jan 2022 11:48:52 -0800 +Subject: hwmon: (lm90) Reduce maximum conversion rate for G781 + +From: Guenter Roeck + +[ Upstream commit a66c5ed539277b9f2363bbace0dba88b85b36c26 ] + +According to its datasheet, G781 supports a maximum conversion rate value +of 8 (62.5 ms). However, chips labeled G781 and G780 were found to only +support a maximum conversion rate value of 7 (125 ms). On the other side, +chips labeled G781-1 and G784 were found to support a conversion rate value +of 8. There is no known means to distinguish G780 from G781 or G784; all +chips report the same manufacturer ID and chip revision. +Setting the conversion rate register value to 8 on chips not supporting +it causes unexpected behavior since the real conversion rate is set to 0 +(16 seconds) if a value of 8 is written into the conversion rate register. +Limit the conversion rate register value to 7 for all G78x chips to avoid +the problem. + +Fixes: ae544f64cc7b ("hwmon: (lm90) Add support for GMT G781") +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/lm90.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c +index 74019dff2550e..cc5e48fe304b1 100644 +--- a/drivers/hwmon/lm90.c ++++ b/drivers/hwmon/lm90.c +@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = { + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, + .alert_alarms = 0x7c, +- .max_convrate = 8, ++ .max_convrate = 7, + }, + [lm86] = { + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT +-- +2.34.1 + diff --git a/queue-5.15/ibmvnic-allow-extra-failures-before-disabling.patch b/queue-5.15/ibmvnic-allow-extra-failures-before-disabling.patch new file mode 100644 index 00000000000..c3b9a1b4ad3 --- /dev/null +++ b/queue-5.15/ibmvnic-allow-extra-failures-before-disabling.patch @@ -0,0 +1,77 @@ +From 54aa01b027d3e0dbafb8a58afee09c182ac29160 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Jan 2022 18:59:18 -0800 +Subject: ibmvnic: Allow extra failures before disabling + +From: Sukadev Bhattiprolu + +[ Upstream commit db9f0e8bf79e6da7068b5818fea0ffd9d0d4b4da ] + +If auto-priority-failover (APF) is enabled and there are at least two +backing devices of different priorities, some resets like fail-over, +change-param etc can cause at least two back to back failovers. (Failover +from high priority backing device to lower priority one and then back +to the higher priority one if that is still functional). + +Depending on the timimg of the two failovers it is possible to trigger +a "hard" reset and for the hard reset to fail due to failovers. When this +occurs, the driver assumes that the network is unstable and disables the +VNIC for a 60-second "settling time". This in turn can cause the ethtool +command to fail with "No such device" while the vnic automatically recovers +a little while later. + +Given that it's possible to have two back to back failures, allow for extra +failures before disabling the vnic for the settling time. + +Fixes: f15fde9d47b8 ("ibmvnic: delay next reset if hard reset fails") +Signed-off-by: Sukadev Bhattiprolu +Reviewed-by: Dany Madden +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/ibm/ibmvnic.c | 21 +++++++++++++++++---- + 1 file changed, 17 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 352ffe982d849..191b3b7350182 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -2424,6 +2424,7 @@ static void __ibmvnic_reset(struct work_struct *work) + struct ibmvnic_rwi *rwi; + unsigned long flags; + u32 reset_state; ++ int num_fails = 0; + int rc = 0; + + adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); +@@ -2477,11 +2478,23 @@ static void __ibmvnic_reset(struct work_struct *work) + rc = do_hard_reset(adapter, rwi, reset_state); + rtnl_unlock(); + } +- if (rc) { +- /* give backing device time to settle down */ ++ if (rc) ++ num_fails++; ++ else ++ num_fails = 0; ++ ++ /* If auto-priority-failover is enabled we can get ++ * back to back failovers during resets, resulting ++ * in at least two failed resets (from high-priority ++ * backing device to low-priority one and then back) ++ * If resets continue to fail beyond that, give the ++ * adapter some time to settle down before retrying. ++ */ ++ if (num_fails >= 3) { + netdev_dbg(adapter->netdev, +- "[S:%s] Hard reset failed, waiting 60 secs\n", +- adapter_state_to_string(adapter->state)); ++ "[S:%s] Hard reset failed %d times, waiting 60 secs\n", ++ adapter_state_to_string(adapter->state), ++ num_fails); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(60 * HZ); + } +-- +2.34.1 + diff --git a/queue-5.15/ibmvnic-don-t-spin-in-tasklet.patch b/queue-5.15/ibmvnic-don-t-spin-in-tasklet.patch new file mode 100644 index 00000000000..40c14cdc5a0 --- /dev/null +++ b/queue-5.15/ibmvnic-don-t-spin-in-tasklet.patch @@ -0,0 +1,47 @@ +From b48ccad2d45647331592da788b1f80cee096fb1e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Jan 2022 18:59:20 -0800 +Subject: ibmvnic: don't spin in tasklet + +From: Sukadev Bhattiprolu + +[ Upstream commit 48079e7fdd0269d66b1d7d66ae88bd03162464ad ] + +ibmvnic_tasklet() continuously spins waiting for responses to all +capability requests. It does this to avoid encountering an error +during initialization of the vnic. However if there is a bug in the +VIOS and we do not receive a response to one or more queries the +tasklet ends up spinning continuously leading to hard lock ups. + +If we fail to receive a message from the VIOS it is reasonable to +timeout the login attempt rather than spin indefinitely in the tasklet. + +Fixes: 249168ad07cd ("ibmvnic: Make CRQ interrupt tasklet wait for all capabilities crqs") +Signed-off-by: Sukadev Bhattiprolu +Reviewed-by: Dany Madden +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/ibm/ibmvnic.c | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index a3dd5c648fecd..5c7371dc83848 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -5317,12 +5317,6 @@ static void ibmvnic_tasklet(struct tasklet_struct *t) + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } +- +- /* remain in tasklet until all +- * capabilities responses are received +- */ +- if (!adapter->wait_capability) +- done = true; + } + /* if capabilities CRQ's were sent in this tasklet, the following + * tasklet must wait until all responses are received +-- +2.34.1 + diff --git a/queue-5.15/ibmvnic-init-running_cap_crqs-early.patch b/queue-5.15/ibmvnic-init-running_cap_crqs-early.patch new file mode 100644 index 00000000000..99353099e88 --- /dev/null +++ b/queue-5.15/ibmvnic-init-running_cap_crqs-early.patch @@ -0,0 +1,327 @@ +From 7f4f1cc4184e6bd1ae66ddc9b5809390aae7fe6f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Jan 2022 18:59:19 -0800 +Subject: ibmvnic: init ->running_cap_crqs early + +From: Sukadev Bhattiprolu + +[ Upstream commit 151b6a5c06b678687f64f2d9a99fd04d5cd32b72 ] + +We use ->running_cap_crqs to determine when the ibmvnic_tasklet() should +send out the next protocol message type. i.e when we get back responses +to all our QUERY_CAPABILITY CRQs we send out REQUEST_CAPABILITY crqs. +Similiary, when we get responses to all the REQUEST_CAPABILITY crqs, we +send out the QUERY_IP_OFFLOAD CRQ. + +We currently increment ->running_cap_crqs as we send out each CRQ and +have the ibmvnic_tasklet() send out the next message type, when this +running_cap_crqs count drops to 0. + +This assumes that all the CRQs of the current type were sent out before +the count drops to 0. However it is possible that we send out say 6 CRQs, +get preempted and receive all the 6 responses before we send out the +remaining CRQs. This can result in ->running_cap_crqs count dropping to +zero before all messages of the current type were sent and we end up +sending the next protocol message too early. + +Instead initialize the ->running_cap_crqs upfront so the tasklet will +only send the next protocol message after all responses are received. + +Use the cap_reqs local variable to also detect any discrepancy (either +now or in future) in the number of capability requests we actually send. + +Currently only send_query_cap() is affected by this behavior (of sending +next message early) since it is called from the worker thread (during +reset) and from application thread (during ->ndo_open()) and they can be +preempted. send_request_cap() is only called from the tasklet which +processes CRQ responses sequentially, is not be affected. But to +maintain the existing symmtery with send_query_capability() we update +send_request_capability() also. + +Fixes: 249168ad07cd ("ibmvnic: Make CRQ interrupt tasklet wait for all capabilities crqs") +Signed-off-by: Sukadev Bhattiprolu +Reviewed-by: Dany Madden +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/ibm/ibmvnic.c | 106 +++++++++++++++++++---------- + 1 file changed, 71 insertions(+), 35 deletions(-) + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 191b3b7350182..a3dd5c648fecd 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -3675,11 +3675,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + int max_entries; ++ int cap_reqs; ++ ++ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on ++ * the PROMISC flag). Initialize this count upfront. When the tasklet ++ * receives a response to all of these, it will send the next protocol ++ * message (QUERY_IP_OFFLOAD). ++ */ ++ if (!(adapter->netdev->flags & IFF_PROMISC) || ++ adapter->promisc_supported) ++ cap_reqs = 7; ++ else ++ cap_reqs = 6; + + if (!retry) { + /* Sub-CRQ entries are 32 byte long */ + int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); + ++ atomic_set(&adapter->running_cap_crqs, cap_reqs); ++ + if (adapter->min_tx_entries_per_subcrq > entries_page || + adapter->min_rx_add_entries_per_subcrq > entries_page) { + dev_err(dev, "Fatal, invalid entries per sub-crq\n"); +@@ -3740,44 +3754,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) + adapter->opt_rx_comp_queues; + + adapter->req_rx_add_queues = adapter->max_rx_add_queues; ++ } else { ++ atomic_add(cap_reqs, &adapter->running_cap_crqs); + } +- + memset(&crq, 0, sizeof(crq)); + crq.request_capability.first = IBMVNIC_CRQ_CMD; + crq.request_capability.cmd = REQUEST_CAPABILITY; + + crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); + crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = + cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); + crq.request_capability.number = + cpu_to_be64(adapter->req_tx_entries_per_subcrq); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = + cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); + crq.request_capability.number = + cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_MTU); + crq.request_capability.number = cpu_to_be64(adapter->req_mtu); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + + if (adapter->netdev->flags & IFF_PROMISC) { +@@ -3785,16 +3800,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) + crq.request_capability.capability = + cpu_to_be16(PROMISC_REQUESTED); + crq.request_capability.number = cpu_to_be64(1); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + } + } else { + crq.request_capability.capability = + cpu_to_be16(PROMISC_REQUESTED); + crq.request_capability.number = cpu_to_be64(0); +- atomic_inc(&adapter->running_cap_crqs); ++ cap_reqs--; + ibmvnic_send_crq(adapter, &crq); + } ++ ++ /* Keep at end to catch any discrepancy between expected and actual ++ * CRQs sent. ++ */ ++ WARN_ON(cap_reqs != 0); + } + + static int pending_scrq(struct ibmvnic_adapter *adapter, +@@ -4188,118 +4208,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter) + static void send_query_cap(struct ibmvnic_adapter *adapter) + { + union ibmvnic_crq crq; ++ int cap_reqs; ++ ++ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count ++ * upfront. When the tasklet receives a response to all of these, it ++ * can send out the next protocol messaage (REQUEST_CAPABILITY). ++ */ ++ cap_reqs = 25; ++ ++ atomic_set(&adapter->running_cap_crqs, cap_reqs); + +- atomic_set(&adapter->running_cap_crqs, 0); + memset(&crq, 0, sizeof(crq)); + crq.query_capability.first = IBMVNIC_CRQ_CMD; + crq.query_capability.cmd = QUERY_CAPABILITY; + + crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = + cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = + cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = + cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = + cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MIN_MTU); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MAX_MTU); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = + cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = + cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = + cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); +- atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; + + crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); +- atomic_inc(&adapter->running_cap_crqs); ++ + ibmvnic_send_crq(adapter, &crq); ++ cap_reqs--; ++ ++ /* Keep at end to catch any discrepancy between expected and actual ++ * CRQs sent. ++ */ ++ WARN_ON(cap_reqs != 0); + } + + static void send_query_ip_offload(struct ibmvnic_adapter *adapter) +@@ -4604,6 +4638,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, + char *name; + + atomic_dec(&adapter->running_cap_crqs); ++ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", ++ atomic_read(&adapter->running_cap_crqs)); + switch (be16_to_cpu(crq->request_capability_rsp.capability)) { + case REQ_TX_QUEUES: + req_value = &adapter->req_tx_queues; +-- +2.34.1 + diff --git a/queue-5.15/io_uring-fix-bug-in-slow-unregistering-of-nodes.patch b/queue-5.15/io_uring-fix-bug-in-slow-unregistering-of-nodes.patch new file mode 100644 index 00000000000..fe398549385 --- /dev/null +++ b/queue-5.15/io_uring-fix-bug-in-slow-unregistering-of-nodes.patch @@ -0,0 +1,61 @@ +From ca410092fb4f457ff5c2a6abc0fbe4d4982fee86 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Jan 2022 04:38:56 -0800 +Subject: io_uring: fix bug in slow unregistering of nodes + +From: Dylan Yudaken + +[ Upstream commit b36a2050040b2d839bdc044007cdd57101d7f881 ] + +In some cases io_rsrc_ref_quiesce will call io_rsrc_node_switch_start, +and then immediately flush the delayed work queue &ctx->rsrc_put_work. + +However the percpu_ref_put does not immediately destroy the node, it +will be called asynchronously via RCU. That ends up with +io_rsrc_node_ref_zero only being called after rsrc_put_work has been +flushed, and so the process ends up sleeping for 1 second unnecessarily. + +This patch executes the put code immediately if we are busy +quiescing. + +Fixes: 4a38aed2a0a7 ("io_uring: batch reap of dead file registrations") +Signed-off-by: Dylan Yudaken +Link: https://lore.kernel.org/r/20220121123856.3557884-1-dylany@fb.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + fs/io_uring.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/fs/io_uring.c b/fs/io_uring.c +index f713b91537f41..993913c585fbf 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -7718,10 +7718,15 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref) + struct io_ring_ctx *ctx = node->rsrc_data->ctx; + unsigned long flags; + bool first_add = false; ++ unsigned long delay = HZ; + + spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); + node->done = true; + ++ /* if we are mid-quiesce then do not delay */ ++ if (node->rsrc_data->quiesce) ++ delay = 0; ++ + while (!list_empty(&ctx->rsrc_ref_list)) { + node = list_first_entry(&ctx->rsrc_ref_list, + struct io_rsrc_node, node); +@@ -7734,7 +7739,7 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref) + spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); + + if (first_add) +- mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ); ++ mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay); + } + + static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) +-- +2.34.1 + diff --git a/queue-5.15/ipv4-avoid-using-shared-ip-generator-for-connected-s.patch b/queue-5.15/ipv4-avoid-using-shared-ip-generator-for-connected-s.patch new file mode 100644 index 00000000000..8628c536cc3 --- /dev/null +++ b/queue-5.15/ipv4-avoid-using-shared-ip-generator-for-connected-s.patch @@ -0,0 +1,70 @@ +From 93840336b602f916b7b287fd2e075c2f7f771d40 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Jan 2022 17:10:22 -0800 +Subject: ipv4: avoid using shared IP generator for connected sockets + +From: Eric Dumazet + +[ Upstream commit 23f57406b82de51809d5812afd96f210f8b627f3 ] + +ip_select_ident_segs() has been very conservative about using +the connected socket private generator only for packets with IP_DF +set, claiming it was needed for some VJ compression implementations. + +As mentioned in this referenced document, this can be abused. +(Ref: Off-Path TCP Exploits of the Mixed IPID Assignment) + +Before switching to pure random IPID generation and possibly hurt +some workloads, lets use the private inet socket generator. + +Not only this will remove one vulnerability, this will also +improve performance of TCP flows using pmtudisc==IP_PMTUDISC_DONT + +Fixes: 73f156a6e8c1 ("inetpeer: get rid of ip_id_count") +Signed-off-by: Eric Dumazet +Reviewed-by: David Ahern +Reported-by: Ray Che +Cc: Willy Tarreau +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + include/net/ip.h | 21 ++++++++++----------- + 1 file changed, 10 insertions(+), 11 deletions(-) + +diff --git a/include/net/ip.h b/include/net/ip.h +index 9192444f2964e..0106c6590ee7b 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -520,19 +520,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, + { + struct iphdr *iph = ip_hdr(skb); + ++ /* We had many attacks based on IPID, use the private ++ * generator as much as we can. ++ */ ++ if (sk && inet_sk(sk)->inet_daddr) { ++ iph->id = htons(inet_sk(sk)->inet_id); ++ inet_sk(sk)->inet_id += segs; ++ return; ++ } + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { +- /* This is only to work around buggy Windows95/2000 +- * VJ compression implementations. If the ID field +- * does not change, they drop every other packet in +- * a TCP stream using header compression. +- */ +- if (sk && inet_sk(sk)->inet_daddr) { +- iph->id = htons(inet_sk(sk)->inet_id); +- inet_sk(sk)->inet_id += segs; +- } else { +- iph->id = 0; +- } ++ iph->id = 0; + } else { ++ /* Unfortunately we need the big hammer to get a suitable IPID */ + __ip_select_ident(net, iph, segs); + } + } +-- +2.34.1 + diff --git a/queue-5.15/ipv4-fix-ip-option-filtering-for-locally-generated-f.patch b/queue-5.15/ipv4-fix-ip-option-filtering-for-locally-generated-f.patch new file mode 100644 index 00000000000..99b5b7aa2fb --- /dev/null +++ b/queue-5.15/ipv4-fix-ip-option-filtering-for-locally-generated-f.patch @@ -0,0 +1,111 @@ +From 0d11ee7d9bfb5991c4544f4f23d9168fbb1fd419 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Jan 2022 16:57:31 -0800 +Subject: ipv4: fix ip option filtering for locally generated fragments + +From: Jakub Kicinski + +[ Upstream commit 27a8caa59babb96c5890569e131bc0eb6d45daee ] + +During IP fragmentation we sanitize IP options. This means overwriting +options which should not be copied with NOPs. Only the first fragment +has the original, full options. + +ip_fraglist_prepare() copies the IP header and options from previous +fragment to the next one. Commit 19c3401a917b ("net: ipv4: place control +buffer handling away from fragmentation iterators") moved sanitizing +options before ip_fraglist_prepare() which means options are sanitized +and then overwritten again with the old values. + +Fixing this is not enough, however, nor did the sanitization work +prior to aforementioned commit. + +ip_options_fragment() (which does the sanitization) uses ipcb->opt.optlen +for the length of the options. ipcb->opt of fragments is not populated +(it's 0), only the head skb has the state properly built. So even when +called at the right time ip_options_fragment() does nothing. This seems +to date back all the way to v2.5.44 when the fast path for pre-fragmented +skbs had been introduced. Prior to that ip_options_build() would have been +called for every fragment (in fact ever since v2.5.44 the fragmentation +handing in ip_options_build() has been dead code, I'll clean it up in +-next). + +In the original patch (see Link) caixf mentions fixing the handling +for fragments other than the second one, but I'm not sure how _any_ +fragment could have had their options sanitized with the code +as it stood. + +Tested with python (MTU on lo lowered to 1000 to force fragmentation): + + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setsockopt(socket.IPPROTO_IP, socket.IP_OPTIONS, + bytearray([7,4,5,192, 20|0x80,4,1,0])) + s.sendto(b'1'*2000, ('127.0.0.1', 1234)) + +Before: + +IP (tos 0x0, ttl 64, id 1053, offset 0, flags [+], proto UDP (17), length 996, options (RR [bad length 4] [bad ptr 5] 192.148.4.1,,RA value 256)) + localhost.36500 > localhost.search-agent: UDP, length 2000 +IP (tos 0x0, ttl 64, id 1053, offset 968, flags [+], proto UDP (17), length 996, options (RR [bad length 4] [bad ptr 5] 192.148.4.1,,RA value 256)) + localhost > localhost: udp +IP (tos 0x0, ttl 64, id 1053, offset 1936, flags [none], proto UDP (17), length 100, options (RR [bad length 4] [bad ptr 5] 192.148.4.1,,RA value 256)) + localhost > localhost: udp + +After: + +IP (tos 0x0, ttl 96, id 42549, offset 0, flags [+], proto UDP (17), length 996, options (RR [bad length 4] [bad ptr 5] 192.148.4.1,,RA value 256)) + localhost.51607 > localhost.search-agent: UDP, bad length 2000 > 960 +IP (tos 0x0, ttl 96, id 42549, offset 968, flags [+], proto UDP (17), length 996, options (NOP,NOP,NOP,NOP,RA value 256)) + localhost > localhost: udp +IP (tos 0x0, ttl 96, id 42549, offset 1936, flags [none], proto UDP (17), length 100, options (NOP,NOP,NOP,NOP,RA value 256)) + localhost > localhost: udp + +RA (20 | 0x80) is now copied as expected, RR (7) is "NOPed out". + +Link: https://lore.kernel.org/netdev/20220107080559.122713-1-ooppublic@163.com/ +Fixes: 19c3401a917b ("net: ipv4: place control buffer handling away from fragmentation iterators") +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Signed-off-by: caixf +Signed-off-by: Jakub Kicinski +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/ipv4/ip_output.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 9bca57ef8b838..ff38b46bd4b0f 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -826,15 +826,24 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + /* Everything is OK. Generate! */ + ip_fraglist_init(skb, iph, hlen, &iter); + +- if (iter.frag) +- ip_options_fragment(iter.frag); +- + for (;;) { + /* Prepare header of the next frame, + * before previous one went down. */ + if (iter.frag) { ++ bool first_frag = (iter.offset == 0); ++ + IPCB(iter.frag)->flags = IPCB(skb)->flags; + ip_fraglist_prepare(skb, &iter); ++ if (first_frag && IPCB(skb)->opt.optlen) { ++ /* ipcb->opt is not populated for frags ++ * coming from __ip_make_skb(), ++ * ip_options_fragment() needs optlen ++ */ ++ IPCB(iter.frag)->opt.optlen = ++ IPCB(skb)->opt.optlen; ++ ip_options_fragment(iter.frag); ++ ip_send_check(iter.iph); ++ } + } + + skb->tstamp = tstamp; +-- +2.34.1 + diff --git a/queue-5.15/ipv4-raw-lock-the-socket-in-raw_bind.patch b/queue-5.15/ipv4-raw-lock-the-socket-in-raw_bind.patch new file mode 100644 index 00000000000..673e1f9e09c --- /dev/null +++ b/queue-5.15/ipv4-raw-lock-the-socket-in-raw_bind.patch @@ -0,0 +1,78 @@ +From 155d484c9d0f6fb43cdd600bef35821ed7b378e6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Jan 2022 16:51:16 -0800 +Subject: ipv4: raw: lock the socket in raw_bind() + +From: Eric Dumazet + +[ Upstream commit 153a0d187e767c68733b8e9f46218eb1f41ab902 ] + +For some reason, raw_bind() forgot to lock the socket. + +BUG: KCSAN: data-race in __ip4_datagram_connect / raw_bind + +write to 0xffff8881170d4308 of 4 bytes by task 5466 on cpu 0: + raw_bind+0x1b0/0x250 net/ipv4/raw.c:739 + inet_bind+0x56/0xa0 net/ipv4/af_inet.c:443 + __sys_bind+0x14b/0x1b0 net/socket.c:1697 + __do_sys_bind net/socket.c:1708 [inline] + __se_sys_bind net/socket.c:1706 [inline] + __x64_sys_bind+0x3d/0x50 net/socket.c:1706 + do_syscall_x64 arch/x86/entry/common.c:50 [inline] + do_syscall_64+0x44/0xd0 arch/x86/entry/common.c:80 + entry_SYSCALL_64_after_hwframe+0x44/0xae + +read to 0xffff8881170d4308 of 4 bytes by task 5468 on cpu 1: + __ip4_datagram_connect+0xb7/0x7b0 net/ipv4/datagram.c:39 + ip4_datagram_connect+0x2a/0x40 net/ipv4/datagram.c:89 + inet_dgram_connect+0x107/0x190 net/ipv4/af_inet.c:576 + __sys_connect_file net/socket.c:1900 [inline] + __sys_connect+0x197/0x1b0 net/socket.c:1917 + __do_sys_connect net/socket.c:1927 [inline] + __se_sys_connect net/socket.c:1924 [inline] + __x64_sys_connect+0x3d/0x50 net/socket.c:1924 + do_syscall_x64 arch/x86/entry/common.c:50 [inline] + do_syscall_64+0x44/0xd0 arch/x86/entry/common.c:80 + entry_SYSCALL_64_after_hwframe+0x44/0xae + +value changed: 0x00000000 -> 0x0003007f + +Reported by Kernel Concurrency Sanitizer on: +CPU: 1 PID: 5468 Comm: syz-executor.5 Not tainted 5.17.0-rc1-syzkaller #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 + +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Signed-off-by: Eric Dumazet +Reported-by: syzbot +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/ipv4/raw.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c +index bb446e60cf580..b8689052079cd 100644 +--- a/net/ipv4/raw.c ++++ b/net/ipv4/raw.c +@@ -721,6 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) + int ret = -EINVAL; + int chk_addr_ret; + ++ lock_sock(sk); + if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) + goto out; + +@@ -740,7 +741,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) + inet->inet_saddr = 0; /* Use device */ + sk_dst_reset(sk); + ret = 0; +-out: return ret; ++out: ++ release_sock(sk); ++ return ret; + } + + /* +-- +2.34.1 + diff --git a/queue-5.15/ipv4-remove-sparse-error-in-ip_neigh_gw4.patch b/queue-5.15/ipv4-remove-sparse-error-in-ip_neigh_gw4.patch new file mode 100644 index 00000000000..7de78605fb4 --- /dev/null +++ b/queue-5.15/ipv4-remove-sparse-error-in-ip_neigh_gw4.patch @@ -0,0 +1,39 @@ +From 3340dc071863f97c9d982d42a801dd41663c165d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Jan 2022 17:34:04 -0800 +Subject: ipv4: remove sparse error in ip_neigh_gw4() + +From: Eric Dumazet + +[ Upstream commit 3c42b2019863b327caa233072c50739d4144dd16 ] + +./include/net/route.h:373:48: warning: incorrect type in argument 2 (different base types) +./include/net/route.h:373:48: expected unsigned int [usertype] key +./include/net/route.h:373:48: got restricted __be32 [usertype] daddr + +Fixes: 5c9f7c1dfc2e ("ipv4: Add helpers for neigh lookup for nexthop") +Signed-off-by: Eric Dumazet +Reviewed-by: David Ahern +Link: https://lore.kernel.org/r/20220127013404.1279313-1-eric.dumazet@gmail.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + include/net/route.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/net/route.h b/include/net/route.h +index 2e6c0e153e3a5..2551f3f03b37e 100644 +--- a/include/net/route.h ++++ b/include/net/route.h +@@ -369,7 +369,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev, + { + struct neighbour *neigh; + +- neigh = __ipv4_neigh_lookup_noref(dev, daddr); ++ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr); + if (unlikely(!neigh)) + neigh = __neigh_create(&arp_tbl, &daddr, dev, false); + +-- +2.34.1 + diff --git a/queue-5.15/ipv4-tcp-send-zero-ipid-in-synack-messages.patch b/queue-5.15/ipv4-tcp-send-zero-ipid-in-synack-messages.patch new file mode 100644 index 00000000000..8a91a48fbec --- /dev/null +++ b/queue-5.15/ipv4-tcp-send-zero-ipid-in-synack-messages.patch @@ -0,0 +1,77 @@ +From 8bad456d7055eceb6d61420b461caee6f28d99dd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Jan 2022 17:10:21 -0800 +Subject: ipv4: tcp: send zero IPID in SYNACK messages + +From: Eric Dumazet + +[ Upstream commit 970a5a3ea86da637471d3cd04d513a0755aba4bf ] + +In commit 431280eebed9 ("ipv4: tcp: send zero IPID for RST and +ACK sent in SYN-RECV and TIME-WAIT state") we took care of some +ctl packets sent by TCP. + +It turns out we need to use a similar strategy for SYNACK packets. + +By default, they carry IP_DF and IPID==0, but there are ways +to ask them to use the hashed IP ident generator and thus +be used to build off-path attacks. +(Ref: Off-Path TCP Exploits of the Mixed IPID Assignment) + +One of this way is to force (before listener is started) +echo 1 >/proc/sys/net/ipv4/ip_no_pmtu_disc + +Another way is using forged ICMP ICMP_FRAG_NEEDED +with a very small MTU (like 68) to force a false return from +ip_dont_fragment() + +In this patch, ip_build_and_send_pkt() uses the following +heuristics. + +1) Most SYNACK packets are smaller than IPV4_MIN_MTU and therefore +can use IP_DF regardless of the listener or route pmtu setting. + +2) In case the SYNACK packet is bigger than IPV4_MIN_MTU, +we use prandom_u32() generator instead of the IPv4 hashed ident one. + +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Signed-off-by: Eric Dumazet +Reported-by: Ray Che +Reviewed-by: David Ahern +Cc: Geoff Alexander +Cc: Willy Tarreau +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/ipv4/ip_output.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index ff38b46bd4b0f..a4d2eb691cbc1 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -162,12 +162,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, + iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); + iph->saddr = saddr; + iph->protocol = sk->sk_protocol; +- if (ip_dont_fragment(sk, &rt->dst)) { ++ /* Do not bother generating IPID for small packets (eg SYNACK) */ ++ if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { + iph->frag_off = htons(IP_DF); + iph->id = 0; + } else { + iph->frag_off = 0; +- __ip_select_ident(net, iph, 1); ++ /* TCP packets here are SYNACK with fat IPv4/TCP options. ++ * Avoid using the hashed IP ident generator. ++ */ ++ if (sk->sk_protocol == IPPROTO_TCP) ++ iph->id = (__force __be16)prandom_u32(); ++ else ++ __ip_select_ident(net, iph, 1); + } + + if (opt && opt->opt.optlen) { +-- +2.34.1 + diff --git a/queue-5.15/kvm-selftests-don-t-skip-l2-s-vmcall-in-smm-test-for.patch b/queue-5.15/kvm-selftests-don-t-skip-l2-s-vmcall-in-smm-test-for.patch new file mode 100644 index 00000000000..5bd12ea53a2 --- /dev/null +++ b/queue-5.15/kvm-selftests-don-t-skip-l2-s-vmcall-in-smm-test-for.patch @@ -0,0 +1,45 @@ +From 6424b98d3b47da44c9768aa26b99d699cb968517 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Jan 2022 22:17:25 +0000 +Subject: KVM: selftests: Don't skip L2's VMCALL in SMM test for SVM guest + +From: Sean Christopherson + +[ Upstream commit 4cf3d3ebe8794c449af3e0e8c1d790c97e461d20 ] + +Don't skip the vmcall() in l2_guest_code() prior to re-entering L2, doing +so will result in L2 running to completion, popping '0' off the stack for +RET, jumping to address '0', and ultimately dying with a triple fault +shutdown. + +It's not at all obvious why the test re-enters L2 and re-executes VMCALL, +but presumably it serves a purpose. The VMX path doesn't skip vmcall(), +and the test can't possibly have passed on SVM, so just do what VMX does. + +Fixes: d951b2210c1a ("KVM: selftests: smm_test: Test SMM enter from L2") +Cc: Maxim Levitsky +Signed-off-by: Sean Christopherson +Message-Id: <20220125221725.2101126-1-seanjc@google.com> +Reviewed-by: Vitaly Kuznetsov +Tested-by: Vitaly Kuznetsov +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/kvm/x86_64/smm_test.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c +index d0fe2fdce58c4..db2a17559c3d5 100644 +--- a/tools/testing/selftests/kvm/x86_64/smm_test.c ++++ b/tools/testing/selftests/kvm/x86_64/smm_test.c +@@ -105,7 +105,6 @@ static void guest_code(void *arg) + + if (cpu_has_svm()) { + run_guest(svm->vmcb, svm->vmcb_gpa); +- svm->vmcb->save.rip += 3; + run_guest(svm->vmcb, svm->vmcb_gpa); + } else { + vmlaunch(); +-- +2.34.1 + diff --git a/queue-5.15/net-bridge-vlan-fix-memory-leak-in-__allowed_ingress.patch b/queue-5.15/net-bridge-vlan-fix-memory-leak-in-__allowed_ingress.patch new file mode 100644 index 00000000000..8222a01becb --- /dev/null +++ b/queue-5.15/net-bridge-vlan-fix-memory-leak-in-__allowed_ingress.patch @@ -0,0 +1,47 @@ +From dd572c581d4b6bcc339151202744ae5ce634a55e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Jan 2022 15:49:53 +0800 +Subject: net: bridge: vlan: fix memory leak in __allowed_ingress + +From: Tim Yi + +[ Upstream commit fd20d9738395cf8e27d0a17eba34169699fccdff ] + +When using per-vlan state, if vlan snooping and stats are disabled, +untagged or priority-tagged ingress frame will go to check pvid state. +If the port state is forwarding and the pvid state is not +learning/forwarding, untagged or priority-tagged frame will be dropped +but skb memory is not freed. +Should free skb when __allowed_ingress returns false. + +Fixes: a580c76d534c ("net: bridge: vlan: add per-vlan state") +Signed-off-by: Tim Yi +Acked-by: Nikolay Aleksandrov +Link: https://lore.kernel.org/r/20220127074953.12632-1-tim.yi@pica8.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/bridge/br_vlan.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c +index 06f5caee495aa..10e63ea6a13e1 100644 +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -560,10 +560,10 @@ static bool __allowed_ingress(const struct net_bridge *br, + !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { + if (*state == BR_STATE_FORWARDING) { + *state = br_vlan_get_pvid_state(vg); +- return br_vlan_state_allowed(*state, true); +- } else { +- return true; ++ if (!br_vlan_state_allowed(*state, true)) ++ goto drop; + } ++ return true; + } + } + v = br_vlan_find(vg, *vid); +-- +2.34.1 + diff --git a/queue-5.15/net-bridge-vlan-fix-single-net-device-option-dumping.patch b/queue-5.15/net-bridge-vlan-fix-single-net-device-option-dumping.patch new file mode 100644 index 00000000000..c5f51559122 --- /dev/null +++ b/queue-5.15/net-bridge-vlan-fix-single-net-device-option-dumping.patch @@ -0,0 +1,42 @@ +From fdeb544261cdb6d88cb50f13d40a2afde0cf04dd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Jan 2022 15:10:25 +0200 +Subject: net: bridge: vlan: fix single net device option dumping + +From: Nikolay Aleksandrov + +[ Upstream commit dcb2c5c6ca9b9177f04abaf76e5a983d177c9414 ] + +When dumping vlan options for a single net device we send the same +entries infinitely because user-space expects a 0 return at the end but +we keep returning skb->len and restarting the dump on retry. Fix it by +returning the value from br_vlan_dump_dev() if it completed or there was +an error. The only case that must return skb->len is when the dump was +incomplete and needs to continue (-EMSGSIZE). + +Reported-by: Benjamin Poirier +Fixes: 8dcea187088b ("net: bridge: vlan: add rtm definitions and dump support") +Signed-off-by: Nikolay Aleksandrov +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/bridge/br_vlan.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c +index 19f65ab91a027..06f5caee495aa 100644 +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -2105,7 +2105,8 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb) + goto out_err; + } + err = br_vlan_dump_dev(dev, skb, cb, dump_flags); +- if (err && err != -EMSGSIZE) ++ /* if the dump completed without an error we return 0 here */ ++ if (err != -EMSGSIZE) + goto out_err; + } else { + for_each_netdev_rcu(net, dev) { +-- +2.34.1 + diff --git a/queue-5.15/net-cpsw-properly-initialise-struct-page_pool_params.patch b/queue-5.15/net-cpsw-properly-initialise-struct-page_pool_params.patch new file mode 100644 index 00000000000..aaadfa3955d --- /dev/null +++ b/queue-5.15/net-cpsw-properly-initialise-struct-page_pool_params.patch @@ -0,0 +1,48 @@ +From 1129eed489e19520c48b2f792e703f5d5b99f50d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 24 Jan 2022 15:35:29 +0100 +Subject: net: cpsw: Properly initialise struct page_pool_params +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Toke Høiland-Jørgensen + +[ Upstream commit c63003e3d99761afb280add3b30de1cf30fa522b ] + +The cpsw driver didn't properly initialise the struct page_pool_params +before calling page_pool_create(), which leads to crashes after the struct +has been expanded with new parameters. + +The second Fixes tag below is where the buggy code was introduced, but +because the code was moved around this patch will only apply on top of the +commit in the first Fixes tag. + +Fixes: c5013ac1dd0e ("net: ethernet: ti: cpsw: move set of common functions in cpsw_priv") +Fixes: 9ed4050c0d75 ("net: ethernet: ti: cpsw: add XDP support") +Reported-by: Colin Foster +Signed-off-by: Toke Høiland-Jørgensen +Tested-by: Colin Foster +Acked-by: Jesper Dangaard Brouer +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/ti/cpsw_priv.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c +index 6bb5ac51d23c3..f8e591d69d2cb 100644 +--- a/drivers/net/ethernet/ti/cpsw_priv.c ++++ b/drivers/net/ethernet/ti/cpsw_priv.c +@@ -1144,7 +1144,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv) + static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, + int size) + { +- struct page_pool_params pp_params; ++ struct page_pool_params pp_params = {}; + struct page_pool *pool; + + pp_params.order = 0; +-- +2.34.1 + diff --git a/queue-5.15/net-hns3-handle-empty-unknown-interrupt-for-vf.patch b/queue-5.15/net-hns3-handle-empty-unknown-interrupt-for-vf.patch new file mode 100644 index 00000000000..c17b873bdc9 --- /dev/null +++ b/queue-5.15/net-hns3-handle-empty-unknown-interrupt-for-vf.patch @@ -0,0 +1,41 @@ +From 32448961a0ce53f1faf86c860982bf19c9a43f6a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Jan 2022 15:03:12 +0800 +Subject: net: hns3: handle empty unknown interrupt for VF + +From: Yufeng Mo + +[ Upstream commit 2f61353cd2f789a4229b6f5c1c24a40a613357bb ] + +Since some interrupt states may be cleared by hardware, the driver +may receive an empty interrupt. Currently, the VF driver directly +disables the vector0 interrupt in this case. As a result, the VF +is unavailable. Therefore, the vector0 interrupt should be enabled +in this case. + +Fixes: b90fcc5bd904 ("net: hns3: add reset handling for VF when doing Core/Global/IMP reset") +Signed-off-by: Yufeng Mo +Signed-off-by: Guangbin Huang +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +index fee7d9e79f8c3..417a08d600b83 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +@@ -2496,8 +2496,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) + break; + } + +- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) +- hclgevf_enable_vector(&hdev->misc_vector, true); ++ hclgevf_enable_vector(&hdev->misc_vector, true); + + return IRQ_HANDLED; + } +-- +2.34.1 + diff --git a/queue-5.15/net-procfs-show-net-devices-bound-packet-types.patch b/queue-5.15/net-procfs-show-net-devices-bound-packet-types.patch new file mode 100644 index 00000000000..4feee8c2a17 --- /dev/null +++ b/queue-5.15/net-procfs-show-net-devices-bound-packet-types.patch @@ -0,0 +1,117 @@ +From 37ef3e0dac27817563c1f645ce93ceba8d54ff3c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Jan 2022 17:15:31 +0800 +Subject: net-procfs: show net devices bound packet types + +From: Jianguo Wu + +[ Upstream commit 1d10f8a1f40b965d449e8f2d5ed7b96a7c138b77 ] + +After commit:7866a621043f ("dev: add per net_device packet type chains"), +we can not get packet types that are bound to a specified net device by +/proc/net/ptype, this patch fix the regression. + +Run "tcpdump -i ens192 udp -nns0" Before and after apply this patch: + +Before: + [root@localhost ~]# cat /proc/net/ptype + Type Device Function + 0800 ip_rcv + 0806 arp_rcv + 86dd ipv6_rcv + +After: + [root@localhost ~]# cat /proc/net/ptype + Type Device Function + ALL ens192 tpacket_rcv + 0800 ip_rcv + 0806 arp_rcv + 86dd ipv6_rcv + +v1 -> v2: + - fix the regression rather than adding new /proc API as + suggested by Stephen Hemminger. + +Fixes: 7866a621043f ("dev: add per net_device packet type chains") +Signed-off-by: Jianguo Wu +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/core/net-procfs.c | 35 ++++++++++++++++++++++++++++++++--- + 1 file changed, 32 insertions(+), 3 deletions(-) + +diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c +index 5b8016335acaf..88cc0ad7d386e 100644 +--- a/net/core/net-procfs.c ++++ b/net/core/net-procfs.c +@@ -190,12 +190,23 @@ static const struct seq_operations softnet_seq_ops = { + .show = softnet_seq_show, + }; + +-static void *ptype_get_idx(loff_t pos) ++static void *ptype_get_idx(struct seq_file *seq, loff_t pos) + { ++ struct list_head *ptype_list = NULL; + struct packet_type *pt = NULL; ++ struct net_device *dev; + loff_t i = 0; + int t; + ++ for_each_netdev_rcu(seq_file_net(seq), dev) { ++ ptype_list = &dev->ptype_all; ++ list_for_each_entry_rcu(pt, ptype_list, list) { ++ if (i == pos) ++ return pt; ++ ++i; ++ } ++ } ++ + list_for_each_entry_rcu(pt, &ptype_all, list) { + if (i == pos) + return pt; +@@ -216,22 +227,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) + __acquires(RCU) + { + rcu_read_lock(); +- return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; ++ return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; + } + + static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) + { ++ struct net_device *dev; + struct packet_type *pt; + struct list_head *nxt; + int hash; + + ++*pos; + if (v == SEQ_START_TOKEN) +- return ptype_get_idx(0); ++ return ptype_get_idx(seq, 0); + + pt = v; + nxt = pt->list.next; ++ if (pt->dev) { ++ if (nxt != &pt->dev->ptype_all) ++ goto found; ++ ++ dev = pt->dev; ++ for_each_netdev_continue_rcu(seq_file_net(seq), dev) { ++ if (!list_empty(&dev->ptype_all)) { ++ nxt = dev->ptype_all.next; ++ goto found; ++ } ++ } ++ ++ nxt = ptype_all.next; ++ goto ptype_all; ++ } ++ + if (pt->type == htons(ETH_P_ALL)) { ++ptype_all: + if (nxt != &ptype_all) + goto found; + hash = 0; +-- +2.34.1 + diff --git a/queue-5.15/net-smc-transitional-solution-for-clcsock-race-issue.patch b/queue-5.15/net-smc-transitional-solution-for-clcsock-race-issue.patch new file mode 100644 index 00000000000..76444d556b7 --- /dev/null +++ b/queue-5.15/net-smc-transitional-solution-for-clcsock-race-issue.patch @@ -0,0 +1,201 @@ +From 9229e8551ae3f3154890de311d3909c90b070c72 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 22 Jan 2022 17:43:09 +0800 +Subject: net/smc: Transitional solution for clcsock race issue + +From: Wen Gu + +[ Upstream commit c0bf3d8a943b6f2e912b7c1de03e2ef28e76f760 ] + +We encountered a crash in smc_setsockopt() and it is caused by +accessing smc->clcsock after clcsock was released. + + BUG: kernel NULL pointer dereference, address: 0000000000000020 + #PF: supervisor read access in kernel mode + #PF: error_code(0x0000) - not-present page + PGD 0 P4D 0 + Oops: 0000 [#1] PREEMPT SMP PTI + CPU: 1 PID: 50309 Comm: nginx Kdump: loaded Tainted: G E 5.16.0-rc4+ #53 + RIP: 0010:smc_setsockopt+0x59/0x280 [smc] + Call Trace: + + __sys_setsockopt+0xfc/0x190 + __x64_sys_setsockopt+0x20/0x30 + do_syscall_64+0x34/0x90 + entry_SYSCALL_64_after_hwframe+0x44/0xae + RIP: 0033:0x7f16ba83918e + + +This patch tries to fix it by holding clcsock_release_lock and +checking whether clcsock has already been released before access. + +In case that a crash of the same reason happens in smc_getsockopt() +or smc_switch_to_fallback(), this patch also checkes smc->clcsock +in them too. And the caller of smc_switch_to_fallback() will identify +whether fallback succeeds according to the return value. + +Fixes: fd57770dd198 ("net/smc: wait for pending work before clcsock release_sock") +Link: https://lore.kernel.org/lkml/5dd7ffd1-28e2-24cc-9442-1defec27375e@linux.ibm.com/T/ +Signed-off-by: Wen Gu +Acked-by: Karsten Graul +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/smc/af_smc.c | 63 +++++++++++++++++++++++++++++++++++++++--------- + 1 file changed, 51 insertions(+), 12 deletions(-) + +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c +index 07ff719f39077..34608369b426f 100644 +--- a/net/smc/af_smc.c ++++ b/net/smc/af_smc.c +@@ -548,12 +548,17 @@ static void smc_stat_fallback(struct smc_sock *smc) + mutex_unlock(&net->smc.mutex_fback_rsn); + } + +-static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code) ++static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code) + { + wait_queue_head_t *smc_wait = sk_sleep(&smc->sk); +- wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk); ++ wait_queue_head_t *clc_wait; + unsigned long flags; + ++ mutex_lock(&smc->clcsock_release_lock); ++ if (!smc->clcsock) { ++ mutex_unlock(&smc->clcsock_release_lock); ++ return -EBADF; ++ } + smc->use_fallback = true; + smc->fallback_rsn = reason_code; + smc_stat_fallback(smc); +@@ -567,18 +572,30 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code) + * smc socket->wq, which should be removed + * to clcsocket->wq during the fallback. + */ ++ clc_wait = sk_sleep(smc->clcsock->sk); + spin_lock_irqsave(&smc_wait->lock, flags); + spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING); + list_splice_init(&smc_wait->head, &clc_wait->head); + spin_unlock(&clc_wait->lock); + spin_unlock_irqrestore(&smc_wait->lock, flags); + } ++ mutex_unlock(&smc->clcsock_release_lock); ++ return 0; + } + + /* fall back during connect */ + static int smc_connect_fallback(struct smc_sock *smc, int reason_code) + { +- smc_switch_to_fallback(smc, reason_code); ++ struct net *net = sock_net(&smc->sk); ++ int rc = 0; ++ ++ rc = smc_switch_to_fallback(smc, reason_code); ++ if (rc) { /* fallback fails */ ++ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt); ++ if (smc->sk.sk_state == SMC_INIT) ++ sock_put(&smc->sk); /* passive closing */ ++ return rc; ++ } + smc_copy_sock_settings_to_clc(smc); + smc->connect_nonblock = 0; + if (smc->sk.sk_state == SMC_INIT) +@@ -1384,11 +1401,12 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code, + { + /* RDMA setup failed, switch back to TCP */ + smc_conn_abort(new_smc, local_first); +- if (reason_code < 0) { /* error, no fallback possible */ ++ if (reason_code < 0 || ++ smc_switch_to_fallback(new_smc, reason_code)) { ++ /* error, no fallback possible */ + smc_listen_out_err(new_smc); + return; + } +- smc_switch_to_fallback(new_smc, reason_code); + if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { + if (smc_clc_send_decline(new_smc, reason_code, version) < 0) { + smc_listen_out_err(new_smc); +@@ -1761,8 +1779,11 @@ static void smc_listen_work(struct work_struct *work) + + /* check if peer is smc capable */ + if (!tcp_sk(newclcsock->sk)->syn_smc) { +- smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC); +- smc_listen_out_connected(new_smc); ++ rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC); ++ if (rc) ++ smc_listen_out_err(new_smc); ++ else ++ smc_listen_out_connected(new_smc); + return; + } + +@@ -2048,7 +2069,9 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + + if (msg->msg_flags & MSG_FASTOPEN) { + if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { +- smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); ++ rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); ++ if (rc) ++ goto out; + } else { + rc = -EINVAL; + goto out; +@@ -2241,6 +2264,11 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, + /* generic setsockopts reaching us here always apply to the + * CLC socket + */ ++ mutex_lock(&smc->clcsock_release_lock); ++ if (!smc->clcsock) { ++ mutex_unlock(&smc->clcsock_release_lock); ++ return -EBADF; ++ } + if (unlikely(!smc->clcsock->ops->setsockopt)) + rc = -EOPNOTSUPP; + else +@@ -2250,6 +2278,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, + sk->sk_err = smc->clcsock->sk->sk_err; + sk_error_report(sk); + } ++ mutex_unlock(&smc->clcsock_release_lock); + + if (optlen < sizeof(int)) + return -EINVAL; +@@ -2266,7 +2295,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, + case TCP_FASTOPEN_NO_COOKIE: + /* option not supported by SMC */ + if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { +- smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); ++ rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); + } else { + rc = -EINVAL; + } +@@ -2309,13 +2338,23 @@ static int smc_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) + { + struct smc_sock *smc; ++ int rc; + + smc = smc_sk(sock->sk); ++ mutex_lock(&smc->clcsock_release_lock); ++ if (!smc->clcsock) { ++ mutex_unlock(&smc->clcsock_release_lock); ++ return -EBADF; ++ } + /* socket options apply to the CLC socket */ +- if (unlikely(!smc->clcsock->ops->getsockopt)) ++ if (unlikely(!smc->clcsock->ops->getsockopt)) { ++ mutex_unlock(&smc->clcsock_release_lock); + return -EOPNOTSUPP; +- return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, +- optval, optlen); ++ } ++ rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, ++ optval, optlen); ++ mutex_unlock(&smc->clcsock_release_lock); ++ return rc; + } + + static int smc_ioctl(struct socket *sock, unsigned int cmd, +-- +2.34.1 + diff --git a/queue-5.15/ping-fix-the-sk_bound_dev_if-match-in-ping_lookup.patch b/queue-5.15/ping-fix-the-sk_bound_dev_if-match-in-ping_lookup.patch new file mode 100644 index 00000000000..64fa7d10da2 --- /dev/null +++ b/queue-5.15/ping-fix-the-sk_bound_dev_if-match-in-ping_lookup.patch @@ -0,0 +1,51 @@ +From 906ff598d27630b0c4d1141166ba47a23928ca67 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 22 Jan 2022 06:40:56 -0500 +Subject: ping: fix the sk_bound_dev_if match in ping_lookup + +From: Xin Long + +[ Upstream commit 2afc3b5a31f9edf3ef0f374f5d70610c79c93a42 ] + +When 'ping' changes to use PING socket instead of RAW socket by: + + # sysctl -w net.ipv4.ping_group_range="0 100" + +the selftests 'router_broadcast.sh' will fail, as such command + + # ip vrf exec vrf-h1 ping -I veth0 198.51.100.255 -b + +can't receive the response skb by the PING socket. It's caused by mismatch +of sk_bound_dev_if and dif in ping_rcv() when looking up the PING socket, +as dif is vrf-h1 if dif's master was set to vrf-h1. + +This patch is to fix this regression by also checking the sk_bound_dev_if +against sdif so that the packets can stil be received even if the socket +is not bound to the vrf device but to the real iif. + +Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind") +Reported-by: Hangbin Liu +Signed-off-by: Xin Long +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/ipv4/ping.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 1e44a43acfe2d..086822cb1cc96 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -220,7 +220,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) + continue; + } + +- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ++ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && ++ sk->sk_bound_dev_if != inet_sdif(skb)) + continue; + + sock_hold(sk); +-- +2.34.1 + diff --git a/queue-5.15/powerpc-perf-fix-power_pmu_disable-to-call-clear_pmi.patch b/queue-5.15/powerpc-perf-fix-power_pmu_disable-to-call-clear_pmi.patch new file mode 100644 index 00000000000..514110433c0 --- /dev/null +++ b/queue-5.15/powerpc-perf-fix-power_pmu_disable-to-call-clear_pmi.patch @@ -0,0 +1,95 @@ +From dbe6ce0e0e207d86b40712eba8219c4c53feba8d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 22 Jan 2022 09:04:29 +0530 +Subject: powerpc/perf: Fix power_pmu_disable to call clear_pmi_irq_pending + only if PMI is pending + +From: Athira Rajeev + +[ Upstream commit fb6433b48a178d4672cb26632454ee0b21056eaa ] + +Running selftest with CONFIG_PPC_IRQ_SOFT_MASK_DEBUG enabled in kernel +triggered below warning: + +[ 172.851380] ------------[ cut here ]------------ +[ 172.851391] WARNING: CPU: 8 PID: 2901 at arch/powerpc/include/asm/hw_irq.h:246 power_pmu_disable+0x270/0x280 +[ 172.851402] Modules linked in: dm_mod bonding nft_ct nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ip_set nf_tables rfkill nfnetlink sunrpc xfs libcrc32c pseries_rng xts vmx_crypto uio_pdrv_genirq uio sch_fq_codel ip_tables ext4 mbcache jbd2 sd_mod t10_pi sg ibmvscsi ibmveth scsi_transport_srp fuse +[ 172.851442] CPU: 8 PID: 2901 Comm: lost_exception_ Not tainted 5.16.0-rc5-03218-g798527287598 #2 +[ 172.851451] NIP: c00000000013d600 LR: c00000000013d5a4 CTR: c00000000013b180 +[ 172.851458] REGS: c000000017687860 TRAP: 0700 Not tainted (5.16.0-rc5-03218-g798527287598) +[ 172.851465] MSR: 8000000000029033 CR: 48004884 XER: 20040000 +[ 172.851482] CFAR: c00000000013d5b4 IRQMASK: 1 +[ 172.851482] GPR00: c00000000013d5a4 c000000017687b00 c000000002a10600 0000000000000004 +[ 172.851482] GPR04: 0000000082004000 c0000008ba08f0a8 0000000000000000 00000008b7ed0000 +[ 172.851482] GPR08: 00000000446194f6 0000000000008000 c00000000013b118 c000000000d58e68 +[ 172.851482] GPR12: c00000000013d390 c00000001ec54a80 0000000000000000 0000000000000000 +[ 172.851482] GPR16: 0000000000000000 0000000000000000 c000000015d5c708 c0000000025396d0 +[ 172.851482] GPR20: 0000000000000000 0000000000000000 c00000000a3bbf40 0000000000000003 +[ 172.851482] GPR24: 0000000000000000 c0000008ba097400 c0000000161e0d00 c00000000a3bb600 +[ 172.851482] GPR28: c000000015d5c700 0000000000000001 0000000082384090 c0000008ba0020d8 +[ 172.851549] NIP [c00000000013d600] power_pmu_disable+0x270/0x280 +[ 172.851557] LR [c00000000013d5a4] power_pmu_disable+0x214/0x280 +[ 172.851565] Call Trace: +[ 172.851568] [c000000017687b00] [c00000000013d5a4] power_pmu_disable+0x214/0x280 (unreliable) +[ 172.851579] [c000000017687b40] [c0000000003403ac] perf_pmu_disable+0x4c/0x60 +[ 172.851588] [c000000017687b60] [c0000000003445e4] __perf_event_task_sched_out+0x1d4/0x660 +[ 172.851596] [c000000017687c50] [c000000000d1175c] __schedule+0xbcc/0x12a0 +[ 172.851602] [c000000017687d60] [c000000000d11ea8] schedule+0x78/0x140 +[ 172.851608] [c000000017687d90] [c0000000001a8080] sys_sched_yield+0x20/0x40 +[ 172.851615] [c000000017687db0] [c0000000000334dc] system_call_exception+0x18c/0x380 +[ 172.851622] [c000000017687e10] [c00000000000c74c] system_call_common+0xec/0x268 + +The warning indicates that MSR_EE being set(interrupt enabled) when +there was an overflown PMC detected. This could happen in +power_pmu_disable since it runs under interrupt soft disable +condition ( local_irq_save ) and not with interrupts hard disabled. +commit 2c9ac51b850d ("powerpc/perf: Fix PMU callbacks to clear +pending PMI before resetting an overflown PMC") intended to clear +PMI pending bit in Paca when disabling the PMU. It could happen +that PMC gets overflown while code is in power_pmu_disable +callback function. Hence add a check to see if PMI pending bit +is set in Paca before clearing it via clear_pmi_pending. + +Fixes: 2c9ac51b850d ("powerpc/perf: Fix PMU callbacks to clear pending PMI before resetting an overflown PMC") +Reported-by: Sachin Sant +Signed-off-by: Athira Rajeev +Tested-by: Sachin Sant +Reviewed-by: Nicholas Piggin +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20220122033429.25395-1-atrajeev@linux.vnet.ibm.com +Signed-off-by: Sasha Levin +--- + arch/powerpc/perf/core-book3s.c | 17 ++++++++++++++--- + 1 file changed, 14 insertions(+), 3 deletions(-) + +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c +index bef6b1abce702..e78de70509472 100644 +--- a/arch/powerpc/perf/core-book3s.c ++++ b/arch/powerpc/perf/core-book3s.c +@@ -1326,9 +1326,20 @@ static void power_pmu_disable(struct pmu *pmu) + * Otherwise provide a warning if there is PMI pending, but + * no counter is found overflown. + */ +- if (any_pmc_overflown(cpuhw)) +- clear_pmi_irq_pending(); +- else ++ if (any_pmc_overflown(cpuhw)) { ++ /* ++ * Since power_pmu_disable runs under local_irq_save, it ++ * could happen that code hits a PMC overflow without PMI ++ * pending in paca. Hence only clear PMI pending if it was ++ * set. ++ * ++ * If a PMI is pending, then MSR[EE] must be disabled (because ++ * the masked PMI handler disabling EE). So it is safe to ++ * call clear_pmi_irq_pending(). ++ */ ++ if (pmi_irq_pending()) ++ clear_pmi_irq_pending(); ++ } else + WARN_ON(pmi_irq_pending()); + + val = mmcra = cpuhw->mmcr.mmcra; +-- +2.34.1 + diff --git a/queue-5.15/revert-drm-ast-support-1600x900-with-108mhz-pclk.patch b/queue-5.15/revert-drm-ast-support-1600x900-with-108mhz-pclk.patch new file mode 100644 index 00000000000..07c511c64d8 --- /dev/null +++ b/queue-5.15/revert-drm-ast-support-1600x900-with-108mhz-pclk.patch @@ -0,0 +1,38 @@ +From a721e5708389aaafa7960458a93fe768aa012141 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 20 Jan 2022 14:05:27 +1000 +Subject: Revert "drm/ast: Support 1600x900 with 108MHz PCLK" + +From: Dave Airlie + +[ Upstream commit 76cea3d95513fe40000d06a3719c4bb6b53275e2 ] + +This reverts commit 9bb7b689274b67ecb3641e399e76f84adc627df1. + +This caused a regression reported to Red Hat. + +Fixes: 9bb7b689274b ("drm/ast: Support 1600x900 with 108MHz PCLK") +Signed-off-by: Dave Airlie +Signed-off-by: Thomas Zimmermann +Link: https://patchwork.freedesktop.org/patch/msgid/20220120040527.552068-1-airlied@gmail.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/ast/ast_tables.h | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h +index d9eb353a4bf09..dbe1cc620f6e6 100644 +--- a/drivers/gpu/drm/ast/ast_tables.h ++++ b/drivers/gpu/drm/ast/ast_tables.h +@@ -282,8 +282,6 @@ static const struct ast_vbios_enhtable res_1360x768[] = { + }; + + static const struct ast_vbios_enhtable res_1600x900[] = { +- {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */ +- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A }, + {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x3A }, +-- +2.34.1 + diff --git a/queue-5.15/revert-ipv6-honor-all-ipv6-pio-valid-lifetime-values.patch b/queue-5.15/revert-ipv6-honor-all-ipv6-pio-valid-lifetime-values.patch new file mode 100644 index 00000000000..31d0ac09008 --- /dev/null +++ b/queue-5.15/revert-ipv6-honor-all-ipv6-pio-valid-lifetime-values.patch @@ -0,0 +1,96 @@ +From adcf1877122cce3e7f7679bddd7e437c161fb415 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Jan 2022 16:38:52 +0100 +Subject: Revert "ipv6: Honor all IPv6 PIO Valid Lifetime values" + +From: Guillaume Nault + +[ Upstream commit 36268983e90316b37000a005642af42234dabb36 ] + +This reverts commit b75326c201242de9495ff98e5d5cff41d7fc0d9d. + +This commit breaks Linux compatibility with USGv6 tests. The RFC this +commit was based on is actually an expired draft: no published RFC +currently allows the new behaviour it introduced. + +Without full IETF endorsement, the flash renumbering scenario this +patch was supposed to enable is never going to work, as other IPv6 +equipements on the same LAN will keep the 2 hours limit. + +Fixes: b75326c20124 ("ipv6: Honor all IPv6 PIO Valid Lifetime values") +Signed-off-by: Guillaume Nault +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/net/addrconf.h | 2 ++ + net/ipv6/addrconf.c | 27 ++++++++++++++++++++------- + 2 files changed, 22 insertions(+), 7 deletions(-) + +diff --git a/include/net/addrconf.h b/include/net/addrconf.h +index 78ea3e332688f..e7ce719838b5e 100644 +--- a/include/net/addrconf.h ++++ b/include/net/addrconf.h +@@ -6,6 +6,8 @@ + #define RTR_SOLICITATION_INTERVAL (4*HZ) + #define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */ + ++#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */ ++ + #define TEMP_VALID_LIFETIME (7*86400) + #define TEMP_PREFERRED_LIFETIME (86400) + #define REGEN_MAX_RETRY (3) +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 846037e73723f..bf13865426340 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -2587,7 +2587,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, + __u32 valid_lft, u32 prefered_lft) + { + struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1); +- int create = 0; ++ int create = 0, update_lft = 0; + + if (!ifp && valid_lft) { + int max_addresses = in6_dev->cnf.max_addresses; +@@ -2631,19 +2631,32 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, + unsigned long now; + u32 stored_lft; + +- /* Update lifetime (RFC4862 5.5.3 e) +- * We deviate from RFC4862 by honoring all Valid Lifetimes to +- * improve the reaction of SLAAC to renumbering events +- * (draft-gont-6man-slaac-renum-06, Section 4.2) +- */ ++ /* update lifetime (RFC2462 5.5.3 e) */ + spin_lock_bh(&ifp->lock); + now = jiffies; + if (ifp->valid_lft > (now - ifp->tstamp) / HZ) + stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ; + else + stored_lft = 0; +- + if (!create && stored_lft) { ++ const u32 minimum_lft = min_t(u32, ++ stored_lft, MIN_VALID_LIFETIME); ++ valid_lft = max(valid_lft, minimum_lft); ++ ++ /* RFC4862 Section 5.5.3e: ++ * "Note that the preferred lifetime of the ++ * corresponding address is always reset to ++ * the Preferred Lifetime in the received ++ * Prefix Information option, regardless of ++ * whether the valid lifetime is also reset or ++ * ignored." ++ * ++ * So we should always update prefered_lft here. ++ */ ++ update_lft = 1; ++ } ++ ++ if (update_lft) { + ifp->valid_lft = valid_lft; + ifp->prefered_lft = prefered_lft; + ifp->tstamp = now; +-- +2.34.1 + diff --git a/queue-5.15/sch_htb-fail-on-unsupported-parameters-when-offload-.patch b/queue-5.15/sch_htb-fail-on-unsupported-parameters-when-offload-.patch new file mode 100644 index 00000000000..d3c74f50cbc --- /dev/null +++ b/queue-5.15/sch_htb-fail-on-unsupported-parameters-when-offload-.patch @@ -0,0 +1,67 @@ +From 761aef4ff1ce7fbbfcc63ecbf520e99ea5b49287 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Jan 2022 12:06:54 +0200 +Subject: sch_htb: Fail on unsupported parameters when offload is requested + +From: Maxim Mikityanskiy + +[ Upstream commit 429c3be8a5e2695b5b92a6a12361eb89eb185495 ] + +The current implementation of HTB offload doesn't support some +parameters. Instead of ignoring them, actively return the EINVAL error +when they are set to non-defaults. + +As this patch goes to stable, the driver API is not changed here. If +future drivers support more offload parameters, the checks can be moved +to the driver side. + +Note that the buffer and cbuffer parameters are also not supported, but +the tc userspace tool assigns some default values derived from rate and +ceil, and identifying these defaults in sch_htb would be unreliable, so +they are still ignored. + +Fixes: d03b195b5aa0 ("sch_htb: Hierarchical QoS hardware offload") +Reported-by: Jakub Kicinski +Signed-off-by: Maxim Mikityanskiy +Reviewed-by: Tariq Toukan +Link: https://lore.kernel.org/r/20220125100654.424570-1-maximmi@nvidia.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/sched/sch_htb.c | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c +index 5067a6e5d4fde..5cbc32fee8674 100644 +--- a/net/sched/sch_htb.c ++++ b/net/sched/sch_htb.c +@@ -1803,6 +1803,26 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, + if (!hopt->rate.rate || !hopt->ceil.rate) + goto failure; + ++ if (q->offload) { ++ /* Options not supported by the offload. */ ++ if (hopt->rate.overhead || hopt->ceil.overhead) { ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter"); ++ goto failure; ++ } ++ if (hopt->rate.mpu || hopt->ceil.mpu) { ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter"); ++ goto failure; ++ } ++ if (hopt->quantum) { ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the quantum parameter"); ++ goto failure; ++ } ++ if (hopt->prio) { ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the prio parameter"); ++ goto failure; ++ } ++ } ++ + /* Keeping backward compatible with rate_table based iproute2 tc */ + if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) + qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB], +-- +2.34.1 + diff --git a/queue-5.15/series b/queue-5.15/series index 8fc7808ea44..ec27c98b766 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -120,3 +120,42 @@ octeontx2-pf-forward-error-codes-to-vf.patch rxrpc-adjust-retransmission-backoff.patch efi-libstub-arm64-fix-image-check-alignment-at-entry.patch io_uring-fix-bug-in-slow-unregistering-of-nodes.patch +drivers-hv-balloon-account-for-vmbus-packet-header-i.patch +hwmon-lm90-reduce-maximum-conversion-rate-for-g781.patch +hwmon-lm90-re-enable-interrupts-after-alert-clears.patch +hwmon-lm90-mark-alert-as-broken-for-max6654.patch +hwmon-lm90-mark-alert-as-broken-for-max6680.patch +hwmon-lm90-mark-alert-as-broken-for-max6646-6647-664.patch +hwmon-lm90-fix-sysfs-and-udev-notifications.patch +hwmon-adt7470-prevent-divide-by-zero-in-adt7470_fan_.patch +powerpc-perf-fix-power_pmu_disable-to-call-clear_pmi.patch +net-procfs-show-net-devices-bound-packet-types.patch +ipv4-fix-ip-option-filtering-for-locally-generated-f.patch +ibmvnic-allow-extra-failures-before-disabling.patch +ibmvnic-init-running_cap_crqs-early.patch +ibmvnic-don-t-spin-in-tasklet.patch +net-smc-transitional-solution-for-clcsock-race-issue.patch +ping-fix-the-sk_bound_dev_if-match-in-ping_lookup.patch +video-hyperv_fb-fix-validation-of-screen-resolution.patch +can-tcan4x5x-regmap-fix-max-register-value.patch +drm-msm-dsi-fix-missing-put_device-call-in-dsi_get_p.patch +drm-msm-hdmi-fix-missing-put_device-call-in-msm_hdmi.patch +drm-msm-dpu-invalid-parameter-check-in-dpu_setup_dsp.patch +drm-msm-fix-wrong-size-calculation.patch +drm-msm-a6xx-add-missing-suspend_count-increment.patch +drm-msm-dsi-invalid-parameter-check-in-msm_dsi_phy_e.patch +yam-fix-a-memory-leak-in-yam_siocdevprivate.patch +net-cpsw-properly-initialise-struct-page_pool_params.patch +net-hns3-handle-empty-unknown-interrupt-for-vf.patch +sch_htb-fail-on-unsupported-parameters-when-offload-.patch +revert-drm-ast-support-1600x900-with-108mhz-pclk.patch +kvm-selftests-don-t-skip-l2-s-vmcall-in-smm-test-for.patch +ceph-put-the-requests-sessions-when-it-fails-to-allo.patch +gve-fix-gfp-flags-when-allocing-pages.patch +revert-ipv6-honor-all-ipv6-pio-valid-lifetime-values.patch +net-bridge-vlan-fix-single-net-device-option-dumping.patch +ipv4-raw-lock-the-socket-in-raw_bind.patch +ipv4-tcp-send-zero-ipid-in-synack-messages.patch +ipv4-avoid-using-shared-ip-generator-for-connected-s.patch +ipv4-remove-sparse-error-in-ip_neigh_gw4.patch +net-bridge-vlan-fix-memory-leak-in-__allowed_ingress.patch diff --git a/queue-5.15/video-hyperv_fb-fix-validation-of-screen-resolution.patch b/queue-5.15/video-hyperv_fb-fix-validation-of-screen-resolution.patch new file mode 100644 index 00000000000..a0af1c3b65d --- /dev/null +++ b/queue-5.15/video-hyperv_fb-fix-validation-of-screen-resolution.patch @@ -0,0 +1,99 @@ +From cd89634912246b987cb45c2b853de5a13b5ffe46 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 16 Jan 2022 11:18:31 -0800 +Subject: video: hyperv_fb: Fix validation of screen resolution + +From: Michael Kelley + +[ Upstream commit 9ff5549b1d1d3c3a9d71220d44bd246586160f1d ] + +In the WIN10 version of the Synthetic Video protocol with Hyper-V, +Hyper-V reports a list of supported resolutions as part of the protocol +negotiation. The driver calculates the maximum width and height from +the list of resolutions, and uses those maximums to validate any screen +resolution specified in the video= option on the kernel boot line. + +This method of validation is incorrect. For example, the list of +supported resolutions could contain 1600x1200 and 1920x1080, both of +which fit in an 8 Mbyte frame buffer. But calculating the max width +and height yields 1920 and 1200, and 1920x1200 resolution does not fit +in an 8 Mbyte frame buffer. Unfortunately, this resolution is accepted, +causing a kernel fault when the driver accesses memory outside the +frame buffer. + +Instead, validate the specified screen resolution by calculating +its size, and comparing against the frame buffer size. Delete the +code for calculating the max width and height from the list of +resolutions, since these max values have no use. Also add the +frame buffer size to the info message to aid in understanding why +a resolution might be rejected. + +Fixes: 67e7cdb4829d ("video: hyperv: hyperv_fb: Obtain screen resolution from Hyper-V host") +Signed-off-by: Michael Kelley +Reviewed-by: Haiyang Zhang +Acked-by: Helge Deller +Link: https://lore.kernel.org/r/1642360711-2335-1-git-send-email-mikelley@microsoft.com +Signed-off-by: Wei Liu +Signed-off-by: Sasha Levin +--- + drivers/video/fbdev/hyperv_fb.c | 16 +++------------- + 1 file changed, 3 insertions(+), 13 deletions(-) + +diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c +index 23999df527393..c8e0ea27caf1d 100644 +--- a/drivers/video/fbdev/hyperv_fb.c ++++ b/drivers/video/fbdev/hyperv_fb.c +@@ -287,8 +287,6 @@ struct hvfb_par { + + static uint screen_width = HVFB_WIDTH; + static uint screen_height = HVFB_HEIGHT; +-static uint screen_width_max = HVFB_WIDTH; +-static uint screen_height_max = HVFB_HEIGHT; + static uint screen_depth; + static uint screen_fb_size; + static uint dio_fb_size; /* FB size for deferred IO */ +@@ -582,7 +580,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) + int ret = 0; + unsigned long t; + u8 index; +- int i; + + memset(msg, 0, sizeof(struct synthvid_msg)); + msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST; +@@ -613,13 +610,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) + goto out; + } + +- for (i = 0; i < msg->resolution_resp.resolution_count; i++) { +- screen_width_max = max_t(unsigned int, screen_width_max, +- msg->resolution_resp.supported_resolution[i].width); +- screen_height_max = max_t(unsigned int, screen_height_max, +- msg->resolution_resp.supported_resolution[i].height); +- } +- + screen_width = + msg->resolution_resp.supported_resolution[index].width; + screen_height = +@@ -941,7 +931,7 @@ static void hvfb_get_option(struct fb_info *info) + + if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN || + (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) && +- (x > screen_width_max || y > screen_height_max)) || ++ (x * y * screen_depth / 8 > screen_fb_size)) || + (par->synthvid_version == SYNTHVID_VERSION_WIN8 && + x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) || + (par->synthvid_version == SYNTHVID_VERSION_WIN7 && +@@ -1194,8 +1184,8 @@ static int hvfb_probe(struct hv_device *hdev, + } + + hvfb_get_option(info); +- pr_info("Screen resolution: %dx%d, Color depth: %d\n", +- screen_width, screen_height, screen_depth); ++ pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n", ++ screen_width, screen_height, screen_depth, screen_fb_size); + + ret = hvfb_getmem(hdev, info); + if (ret) { +-- +2.34.1 + diff --git a/queue-5.15/yam-fix-a-memory-leak-in-yam_siocdevprivate.patch b/queue-5.15/yam-fix-a-memory-leak-in-yam_siocdevprivate.patch new file mode 100644 index 00000000000..b9337e25f62 --- /dev/null +++ b/queue-5.15/yam-fix-a-memory-leak-in-yam_siocdevprivate.patch @@ -0,0 +1,37 @@ +From 447ac696a53c4d9173f78f77e9e947c4087dce75 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 24 Jan 2022 11:29:54 +0800 +Subject: yam: fix a memory leak in yam_siocdevprivate() + +From: Hangyu Hua + +[ Upstream commit 29eb31542787e1019208a2e1047bb7c76c069536 ] + +ym needs to be free when ym->cmd != SIOCYAMSMCS. + +Fixes: 0781168e23a2 ("yam: fix a missing-check bug") +Signed-off-by: Hangyu Hua +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/hamradio/yam.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c +index 6ddacbdb224ba..528d57a435394 100644 +--- a/drivers/net/hamradio/yam.c ++++ b/drivers/net/hamradio/yam.c +@@ -950,9 +950,7 @@ static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __ + ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs)); + if (IS_ERR(ym)) + return PTR_ERR(ym); +- if (ym->cmd != SIOCYAMSMCS) +- return -EINVAL; +- if (ym->bitrate > YAM_MAXBITRATE) { ++ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) { + kfree(ym); + return -EINVAL; + } +-- +2.34.1 +