--- /dev/null
+From a4e03921c1bb118e6718e0a3b0322a2c13ed172b Mon Sep 17 00:00:00 2001
+From: Giulio Benetti <giulio.benetti@benettiengineering.com>
+Date: Tue, 13 Dec 2022 20:24:03 +0100
+Subject: ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment
+
+From: Giulio Benetti <giulio.benetti@benettiengineering.com>
+
+commit a4e03921c1bb118e6718e0a3b0322a2c13ed172b upstream.
+
+zero_page is a void* pointer but memblock_alloc() returns phys_addr_t type
+so this generates a warning while using clang and with -Wint-error enabled
+that becomes and error. So let's cast the return of memblock_alloc() to
+(void *).
+
+Cc: <stable@vger.kernel.org> # 4.14.x +
+Fixes: 340a982825f7 ("ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation")
+Signed-off-by: Giulio Benetti <giulio.benetti@benettiengineering.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/nommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -161,7 +161,7 @@ void __init paging_init(const struct mac
+ mpu_setup();
+
+ /* allocate the zero page. */
+- zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++ zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
--- /dev/null
+From 15b207d0abdcbb2271774aa99d9a290789159e75 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 20 Jan 2023 11:21:53 +0800
+Subject: drm/amd/pm: add missing AllowIHInterrupt message mapping for SMU13.0.0
+
+From: Evan Quan <evan.quan@amd.com>
+
+commit 15b207d0abdcbb2271774aa99d9a290789159e75 upstream.
+
+Add SMU13.0.0 AllowIHInterrupt message mapping.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 4c20d17e7416..cf96c3f2affe 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
+ MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
+ PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
+ MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
++ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
+ };
+
+ static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
+--
+2.39.1
+
--- /dev/null
+From 1119e1f9636b76aef14068c7fd0b4d55132b86b8 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Wed, 23 Nov 2022 14:50:16 -0500
+Subject: drm/amdgpu/display/mst: Fix mst_state->pbn_div and slot count assignments
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 1119e1f9636b76aef14068c7fd0b4d55132b86b8 upstream.
+
+Looks like I made a pretty big mistake here without noticing: it seems when
+I moved the assignments of mst_state->pbn_div I completely missed the fact
+that the reason for us calling drm_dp_mst_update_slots() earlier was to
+account for the fact that we need to call this function using info from the
+root MST connector, instead of just trying to do this from each MST
+encoder's atomic check function. Otherwise, we end up filling out all of
+DC's link information with zeroes.
+
+So, let's restore that and hopefully fix this DSC regression.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2171
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Fixes: 4d07b0bc4034 ("drm/display/dp_mst: Move all payload info into the atomic state")
+Cc: stable@vger.kernel.org # 6.1
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Tested-by: Didier Raboud <odyx@debian.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 24 ++++++++++++
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 5 --
+ 2 files changed, 24 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9393,6 +9393,8 @@ static int amdgpu_dm_atomic_check(struct
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
++ struct drm_dp_mst_topology_mgr *mgr;
++ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ enum dc_status status;
+@@ -9648,6 +9650,28 @@ static int amdgpu_dm_atomic_check(struct
+ lock_and_validation_needed = true;
+ }
+
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++ /* set the slot info for each mst_state based on the link encoding format */
++ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
++ struct amdgpu_dm_connector *aconnector;
++ struct drm_connector *connector;
++ struct drm_connector_list_iter iter;
++ u8 link_coding_cap;
++
++ drm_connector_list_iter_begin(dev, &iter);
++ drm_for_each_connector_iter(connector, &iter) {
++ if (connector->index == mst_state->mgr->conn_base_id) {
++ aconnector = to_amdgpu_dm_connector(connector);
++ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
++ drm_dp_mst_update_slots(mst_state, link_coding_cap);
++
++ break;
++ }
++ }
++ drm_connector_list_iter_end(&iter);
++ }
++#endif
++
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -896,11 +896,6 @@ static int compute_mst_dsc_configs_for_l
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+- mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
+-#if defined(CONFIG_DRM_AMD_DC_DCN)
+- drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
+-#endif
+-
+ /* Set up params */
+ for (i = 0; i < dc_state->stream_count; i++) {
+ struct dc_dsc_policy dsc_policy = {0};
--- /dev/null
+From cb1e0b015f56b8f3c7f5ce33ff4b782ee5674512 Mon Sep 17 00:00:00 2001
+From: Wayne Lin <Wayne.Lin@amd.com>
+Date: Fri, 9 Dec 2022 19:05:33 +0800
+Subject: drm/amdgpu/display/mst: limit payload to be updated one by one
+
+From: Wayne Lin <Wayne.Lin@amd.com>
+
+commit cb1e0b015f56b8f3c7f5ce33ff4b782ee5674512 upstream.
+
+[Why]
+amdgpu expects to update payload table for one stream one time
+by calling dm_helpers_dp_mst_write_payload_allocation_table().
+Currently, it get modified to try to update HW payload table
+at once by referring mst_state.
+
+[How]
+This is just a quick workaround. Should find way to remove the
+temporary struct dc_dp_mst_stream_allocation_table later if set
+struct link_mst_stream_allocatio directly is possible.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2171
+Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Fixes: 4d07b0bc4034 ("drm/display/dp_mst: Move all payload info into the atomic state")
+Cc: stable@vger.kernel.org # 6.1
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Tested-by: Didier Raboud <odyx@debian.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 51 ++++++++++----
+ 1 file changed, 39 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edi
+ }
+
+ static void
+-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
+- struct amdgpu_dm_connector *aconnector,
++fill_dc_mst_payload_table_from_drm(struct dc_link *link,
++ bool enable,
++ struct drm_dp_mst_atomic_payload *target_payload,
+ struct dc_dp_mst_stream_allocation_table *table)
+ {
+ struct dc_dp_mst_stream_allocation_table new_table = { 0 };
+ struct dc_dp_mst_stream_allocation *sa;
+- struct drm_dp_mst_atomic_payload *payload;
++ struct link_mst_stream_allocation_table copy_of_link_table =
++ link->mst_stream_alloc_table;
++
++ int i;
++ int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
++ struct link_mst_stream_allocation *dc_alloc;
++
++ /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
++ if (enable) {
++ dc_alloc =
++ ©_of_link_table.stream_allocations[current_hw_table_stream_cnt];
++ dc_alloc->vcp_id = target_payload->vcpi;
++ dc_alloc->slot_count = target_payload->time_slots;
++ } else {
++ for (i = 0; i < copy_of_link_table.stream_count; i++) {
++ dc_alloc =
++ ©_of_link_table.stream_allocations[i];
++
++ if (dc_alloc->vcp_id == target_payload->vcpi) {
++ dc_alloc->vcp_id = 0;
++ dc_alloc->slot_count = 0;
++ break;
++ }
++ }
++ ASSERT(i != copy_of_link_table.stream_count);
++ }
+
+ /* Fill payload info*/
+- list_for_each_entry(payload, &mst_state->payloads, next) {
+- if (payload->delete)
+- continue;
+-
+- sa = &new_table.stream_allocations[new_table.stream_count];
+- sa->slot_count = payload->time_slots;
+- sa->vcp_id = payload->vcpi;
+- new_table.stream_count++;
++ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
++ dc_alloc =
++ ©_of_link_table.stream_allocations[i];
++ if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
++ sa = &new_table.stream_allocations[new_table.stream_count];
++ sa->slot_count = dc_alloc->slot_count;
++ sa->vcp_id = dc_alloc->vcp_id;
++ new_table.stream_count++;
++ }
+ }
+
+ /* Overwrite the old table */
+@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_all
+ * AUX message. The sequence is slot 1-63 allocated sequence for each
+ * stream. AMD ASIC stream slot allocation should follow the same
+ * sequence. copy DRM MST allocation to dc */
+- fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
++ fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+
+ return true;
+ }
--- /dev/null
+From f85c5e25fd28fe0bf6d6d0563cf83758a4e05c8f Mon Sep 17 00:00:00 2001
+From: Wayne Lin <Wayne.Lin@amd.com>
+Date: Mon, 12 Dec 2022 15:41:18 +0800
+Subject: drm/amdgpu/display/mst: update mst_mgr relevant variable when long HPD
+
+From: Wayne Lin <Wayne.Lin@amd.com>
+
+commit f85c5e25fd28fe0bf6d6d0563cf83758a4e05c8f upstream.
+
+[Why & How]
+Now the vc_start_slot is controlled at drm side. When we
+service a long HPD, we still need to run
+dm_helpers_dp_mst_write_payload_allocation_table() to update
+drm mst_mgr's relevant variable. Otherwise, on the next plug-in,
+payload will get assigned with a wrong start slot.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2171
+Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Fixes: 4d07b0bc4034 ("drm/display/dp_mst: Move all payload info into the atomic state")
+Cc: stable@vger.kernel.org # 6.1
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Tested-by: Didier Raboud <odyx@debian.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 342e906ae26e..c88f044666fe 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
++ /* adjust for drm changes*/
++ bool update_drm_mst_state = true;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ const struct dc_link_settings empty_link_settings = {0};
+ DC_LOGGER_INIT(link->ctx->logger);
+
++
+ /* deallocate_mst_payload is called before disable link. When mode or
+ * disable/enable monitor, new stream is created which is not in link
+ * stream[] yet. For this, payload is not allocated yet, so de-alloc
+@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+
+- if (mst_mode) {
++ if (mst_mode || update_drm_mst_state) {
+ /* when link is in mst mode, reply on mst manager to remove
+ * payload
+ */
+@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ stream->ctx,
+ stream);
+
++ if (!update_drm_mst_state)
++ dm_helpers_dp_mst_send_payload_allocation(
++ stream->ctx,
++ stream,
++ false);
++ }
++
++ if (update_drm_mst_state)
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+- }
+
+ return DC_OK;
+ }
+--
+2.39.1
+
--- /dev/null
+From 2de3769830346e68b3de0f4abc0d8e2625ad9dac Mon Sep 17 00:00:00 2001
+From: Jonathan Kim <jonathan.kim@amd.com>
+Date: Thu, 19 Jan 2023 18:42:03 -0500
+Subject: drm/amdgpu: remove unconditional trap enable on add gfx11 queues
+
+From: Jonathan Kim <jonathan.kim@amd.com>
+
+commit 2de3769830346e68b3de0f4abc0d8e2625ad9dac upstream.
+
+Rebase of driver has incorrect unconditional trap enablement
+for GFX11 when adding mes queues.
+
+Reported-by: Graham Sider <graham.sider@amd.com>
+Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
+Reviewed-by: Graham Sider <graham.sider@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -192,7 +192,6 @@ static int mes_v11_0_add_hw_queue(struct
+ mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
+ mes_add_queue_pkt.tma_addr = input->tma_addr;
+ mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+- mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
--- /dev/null
+From d8bf2df715bb8ac964f91fe8bf67c37c5d916463 Mon Sep 17 00:00:00 2001
+From: Wayne Lin <Wayne.Lin@amd.com>
+Date: Wed, 28 Dec 2022 14:50:43 +0800
+Subject: drm/display/dp_mst: Correct the kref of port.
+
+From: Wayne Lin <Wayne.Lin@amd.com>
+
+commit d8bf2df715bb8ac964f91fe8bf67c37c5d916463 upstream.
+
+[why & how]
+We still need to refer to port while removing payload at commit_tail.
+we should keep the kref till then to release.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2171
+Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Fixes: 4d07b0bc4034 ("drm/display/dp_mst: Move all payload info into the atomic state")
+Cc: stable@vger.kernel.org # 6.1
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Tested-by: Didier Raboud <odyx@debian.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/display/drm_dp_mst_topology.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 51a46689cda7..4ca37261584a 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+
+ mgr->payload_count--;
+ mgr->next_start_slot -= payload->time_slots;
++
++ if (payload->delete)
++ drm_dp_mst_put_port_malloc(payload->port);
+ }
+ EXPORT_SYMBOL(drm_dp_remove_payload);
+
+@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
+
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
+ if (!payload->delete) {
+- drm_dp_mst_put_port_malloc(port);
+ payload->pbn = 0;
+ payload->delete = true;
+ topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
+--
+2.39.1
+
--- /dev/null
+From cec669ff716cc83505c77b242aecf6f7baad869d Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Date: Wed, 18 Jan 2023 20:38:48 +0530
+Subject: EDAC/device: Respect any driver-supplied workqueue polling value
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+commit cec669ff716cc83505c77b242aecf6f7baad869d upstream.
+
+The EDAC drivers may optionally pass the poll_msec value. Use that value
+if available, else fall back to 1000ms.
+
+ [ bp: Touchups. ]
+
+Fixes: e27e3dac6517 ("drivers/edac: add edac_device class")
+Reported-by: Luca Weiss <luca.weiss@fairphone.com>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Steev Klimaszewski <steev@kali.org> # Thinkpad X13s
+Tested-by: Andrew Halaney <ahalaney@redhat.com> # sa8540p-ride
+Cc: <stable@vger.kernel.org> # 4.9
+Link: https://lore.kernel.org/r/COZYL8MWN97H.MROQ391BGA09@otso
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/edac_device.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -34,6 +34,9 @@
+ static DEFINE_MUTEX(device_ctls_mutex);
+ static LIST_HEAD(edac_device_list);
+
++/* Default workqueue processing interval on this instance, in msecs */
++#define DEFAULT_POLL_INTERVAL 1000
++
+ #ifdef CONFIG_EDAC_DEBUG
+ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+ {
+@@ -336,7 +339,7 @@ static void edac_device_workq_function(s
+ * whole one second to save timers firing all over the period
+ * between integral seconds
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -366,7 +369,7 @@ static void edac_device_workq_setup(stru
+ * timers firing on sub-second basis, while they are happy
+ * to fire together on the 1 second exactly
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -400,7 +403,7 @@ void edac_device_reset_delay_period(stru
+ edac_dev->delay = msecs_to_jiffies(msec);
+
+ /* See comment in edac_device_workq_setup() above */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_mod_work(&edac_dev->work, edac_dev->delay);
+@@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_d
+ /* This instance is NOW RUNNING */
+ edac_dev->op_state = OP_RUNNING_POLL;
+
+- /*
+- * enable workq processing on this instance,
+- * default = 1000 msec
+- */
+- edac_device_workq_setup(edac_dev, 1000);
++ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
+ } else {
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
--- /dev/null
+From 977c6ba624f24ae20cf0faee871257a39348d4a9 Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Date: Wed, 18 Jan 2023 20:38:50 +0530
+Subject: EDAC/qcom: Do not pass llcc_driv_data as edac_device_ctl_info's pvt_info
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+commit 977c6ba624f24ae20cf0faee871257a39348d4a9 upstream.
+
+The memory for llcc_driv_data is allocated by the LLCC driver. But when
+it is passed as the private driver info to the EDAC core, it will get freed
+during the qcom_edac driver release. So when the qcom_edac driver gets probed
+again, it will try to use the freed data leading to the use-after-free bug.
+
+Hence, do not pass llcc_driv_data as pvt_info but rather reference it
+using the platform_data pointer in the qcom_edac driver.
+
+Fixes: 27450653f1db ("drivers: edac: Add EDAC driver support for QCOM SoCs")
+Reported-by: Steev Klimaszewski <steev@kali.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Steev Klimaszewski <steev@kali.org> # Thinkpad X13s
+Tested-by: Andrew Halaney <ahalaney@redhat.com> # sa8540p-ride
+Cc: <stable@vger.kernel.org> # 4.20
+Link: https://lore.kernel.org/r/20230118150904.26913-4-manivannan.sadhasivam@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/qcom_edac.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -252,7 +252,7 @@ clear:
+ static int
+ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
+ {
+- struct llcc_drv_data *drv = edev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
+ int ret;
+
+ ret = dump_syn_reg_values(drv, bank, err_type);
+@@ -289,7 +289,7 @@ static irqreturn_t
+ llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ {
+ struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
+- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
+ irqreturn_t irq_rc = IRQ_NONE;
+ u32 drp_error, trp_error, i;
+ int ret;
+@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct p
+ edev_ctl->dev_name = dev_name(dev);
+ edev_ctl->ctl_name = "llcc";
+ edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+- edev_ctl->pvt_info = llcc_driv_data;
+
+ rc = edac_device_add_device(edev_ctl);
+ if (rc)
--- /dev/null
+From b00c51ef8f72ced0965d021a291b98ff822c5337 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 22 Jan 2023 10:02:55 -0700
+Subject: io_uring/net: cache provided buffer group value for multishot receives
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit b00c51ef8f72ced0965d021a291b98ff822c5337 upstream.
+
+If we're using ring provided buffers with multishot receive, and we end
+up doing an io-wq based issue at some points that also needs to select
+a buffer, we'll lose the initially assigned buffer group as
+io_ring_buffer_select() correctly clears the buffer group list as the
+issue isn't serialized by the ctx uring_lock. This is fine for normal
+receives as the request puts the buffer and finishes, but for multishot,
+we will re-arm and do further receives. On the next trigger for this
+multishot receive, the receive will try and pick from a buffer group
+whose value is the same as the buffer ID of the las receive. That is
+obviously incorrect, and will result in a premature -ENOUFS error for
+the receive even if we had available buffers in the correct group.
+
+Cache the buffer group value at prep time, so we can restore it for
+future receives. This only needs doing for the above mentioned case, but
+just do it by default to keep it easier to read.
+
+Cc: stable@vger.kernel.org
+Fixes: b3fdea6ecb55 ("io_uring: multishot recv")
+Fixes: 9bb66906f23e ("io_uring: support multishot in recvmsg")
+Cc: Dylan Yudaken <dylany@meta.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -62,6 +62,7 @@ struct io_sr_msg {
+ u16 flags;
+ /* initialised and used only by !msg send variants */
+ u16 addr_len;
++ u16 buf_group;
+ void __user *addr;
+ /* used only for send zerocopy */
+ struct io_kiocb *notif;
+@@ -565,6 +566,15 @@ int io_recvmsg_prep(struct io_kiocb *req
+ if (req->opcode == IORING_OP_RECV && sr->len)
+ return -EINVAL;
+ req->flags |= REQ_F_APOLL_MULTISHOT;
++ /*
++ * Store the buffer group for this multishot receive separately,
++ * as if we end up doing an io-wq based issue that selects a
++ * buffer, it has to be committed immediately and that will
++ * clear ->buf_list. This means we lose the link to the buffer
++ * list, and the eventual buffer put on completion then cannot
++ * restore it.
++ */
++ sr->buf_group = req->buf_index;
+ }
+
+ #ifdef CONFIG_COMPAT
+@@ -581,6 +591,7 @@ static inline void io_recv_prep_retry(st
+
+ sr->done_io = 0;
+ sr->len = 0; /* get from the provided buffer */
++ req->buf_index = sr->buf_group;
+ }
+
+ /*
--- /dev/null
+From 9f535c870e493841ac7be390610ff2edec755762 Mon Sep 17 00:00:00 2001
+From: Gergely Risko <gergely.risko@gmail.com>
+Date: Thu, 19 Jan 2023 14:40:41 +0100
+Subject: ipv6: fix reachability confirmation with proxy_ndp
+
+From: Gergely Risko <gergely.risko@gmail.com>
+
+commit 9f535c870e493841ac7be390610ff2edec755762 upstream.
+
+When proxying IPv6 NDP requests, the adverts to the initial multicast
+solicits are correct and working. On the other hand, when later a
+reachability confirmation is requested (on unicast), no reply is sent.
+
+This causes the neighbor entry expiring on the sending node, which is
+mostly a non-issue, as a new multicast request is sent. There are
+routers, where the multicast requests are intentionally delayed, and in
+these environments the current implementation causes periodic packet
+loss for the proxied endpoints.
+
+The root cause is the erroneous decrease of the hop limit, as this
+is checked in ndisc.c and no answer is generated when it's 254 instead
+of the correct 255.
+
+Cc: stable@vger.kernel.org
+Fixes: 46c7655f0b56 ("ipv6: decrease hop limit counter in ip6_forward()")
+Signed-off-by: Gergely Risko <gergely.risko@gmail.com>
+Tested-by: Gergely Risko <gergely.risko@gmail.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -547,7 +547,20 @@ int ip6_forward(struct sk_buff *skb)
+ pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
+ int proxied = ip6_forward_proxy_check(skb);
+ if (proxied > 0) {
+- hdr->hop_limit--;
++ /* It's tempting to decrease the hop limit
++ * here by 1, as we do at the end of the
++ * function too.
++ *
++ * But that would be incorrect, as proxying is
++ * not forwarding. The ip6_input function
++ * will handle this packet locally, and it
++ * depends on the hop limit being unchanged.
++ *
++ * One example is the NDP hop limit, that
++ * always has to stay 255, but other would be
++ * similar checks around RA packets, where the
++ * user can even change the desired limit.
++ */
+ return ip6_input(skb);
+ } else if (proxied < 0) {
+ __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
--- /dev/null
+From 0d0d4680db22eda1eea785c47bbf66a9b33a8b16 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Thu, 29 Dec 2022 18:33:25 +0900
+Subject: ksmbd: add max connections parameter
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 0d0d4680db22eda1eea785c47bbf66a9b33a8b16 upstream.
+
+Add max connections parameter to limit number of maximum simultaneous
+connections.
+
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Cc: stable@vger.kernel.org
+Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/ksmbd_netlink.h | 3 ++-
+ fs/ksmbd/server.h | 1 +
+ fs/ksmbd/transport_ipc.c | 3 +++
+ fs/ksmbd/transport_tcp.c | 17 ++++++++++++++++-
+ 4 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/fs/ksmbd/ksmbd_netlink.h
++++ b/fs/ksmbd/ksmbd_netlink.h
+@@ -105,7 +105,8 @@ struct ksmbd_startup_request {
+ __u32 sub_auth[3]; /* Subauth value for Security ID */
+ __u32 smb2_max_credits; /* MAX credits */
+ __u32 smbd_max_io_size; /* smbd read write size */
+- __u32 reserved[127]; /* Reserved room */
++ __u32 max_connections; /* Number of maximum simultaneous connections */
++ __u32 reserved[126]; /* Reserved room */
+ __u32 ifc_list_sz; /* interfaces list size */
+ __s8 ____payload[];
+ };
+--- a/fs/ksmbd/server.h
++++ b/fs/ksmbd/server.h
+@@ -41,6 +41,7 @@ struct ksmbd_server_config {
+ unsigned int share_fake_fscaps;
+ struct smb_sid domain_sid;
+ unsigned int auth_mechs;
++ unsigned int max_connections;
+
+ char *conf[SERVER_CONF_WORK_GROUP + 1];
+ };
+--- a/fs/ksmbd/transport_ipc.c
++++ b/fs/ksmbd/transport_ipc.c
+@@ -308,6 +308,9 @@ static int ipc_server_config_on_startup(
+ if (req->smbd_max_io_size)
+ init_smbd_max_io_size(req->smbd_max_io_size);
+
++ if (req->max_connections)
++ server_conf.max_connections = req->max_connections;
++
+ ret = ksmbd_set_netbios_name(req->netbios_name);
+ ret |= ksmbd_set_server_string(req->server_string);
+ ret |= ksmbd_set_work_group(req->work_group);
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -15,6 +15,8 @@
+ #define IFACE_STATE_DOWN BIT(0)
+ #define IFACE_STATE_CONFIGURED BIT(1)
+
++static atomic_t active_num_conn;
++
+ struct interface {
+ struct task_struct *ksmbd_kthread;
+ struct socket *ksmbd_socket;
+@@ -185,8 +187,10 @@ static int ksmbd_tcp_new_connection(stru
+ struct tcp_transport *t;
+
+ t = alloc_transport(client_sk);
+- if (!t)
++ if (!t) {
++ sock_release(client_sk);
+ return -ENOMEM;
++ }
+
+ csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
+ if (kernel_getpeername(client_sk, csin) < 0) {
+@@ -239,6 +243,15 @@ static int ksmbd_kthread_fn(void *p)
+ continue;
+ }
+
++ if (server_conf.max_connections &&
++ atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
++ pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
++ atomic_read(&active_num_conn));
++ atomic_dec(&active_num_conn);
++ sock_release(client_sk);
++ continue;
++ }
++
+ ksmbd_debug(CONN, "connect success: accepted new connection\n");
+ client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+ client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+@@ -368,6 +381,8 @@ static int ksmbd_tcp_writev(struct ksmbd
+ static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
+ {
+ free_transport(TCP_TRANS(t));
++ if (server_conf.max_connections)
++ atomic_dec(&active_num_conn);
+ }
+
+ static void tcp_destroy_socket(struct socket *ksmbd_socket)
--- /dev/null
+From 5fde3c21cf33830eda7bfd006dc7f4bf07ec9fe6 Mon Sep 17 00:00:00 2001
+From: Marios Makassikis <mmakassikis@freebox.fr>
+Date: Wed, 11 Jan 2023 17:39:02 +0100
+Subject: ksmbd: do not sign response to session request for guest login
+
+From: Marios Makassikis <mmakassikis@freebox.fr>
+
+commit 5fde3c21cf33830eda7bfd006dc7f4bf07ec9fe6 upstream.
+
+If ksmbd.mountd is configured to assign unknown users to the guest account
+("map to guest = bad user" in the config), ksmbd signs the response.
+
+This is wrong according to MS-SMB2 3.3.5.5.3:
+ 12. If the SMB2_SESSION_FLAG_IS_GUEST bit is not set in the SessionFlags
+ field, and Session.IsAnonymous is FALSE, the server MUST sign the
+ final session setup response before sending it to the client, as
+ follows:
+ [...]
+
+This fixes libsmb2 based applications failing to establish a session
+("Wrong signature in received").
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Marios Makassikis <mmakassikis@freebox.fr>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -8657,6 +8657,7 @@ int smb3_decrypt_req(struct ksmbd_work *
+ bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+ {
+ struct ksmbd_conn *conn = work->conn;
++ struct ksmbd_session *sess = work->sess;
+ struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
+
+ if (conn->dialect < SMB30_PROT_ID)
+@@ -8666,6 +8667,7 @@ bool smb3_11_final_sess_setup_resp(struc
+ rsp = ksmbd_resp_buf_next(work);
+
+ if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
++ sess->user && !user_guest(sess->user) &&
+ rsp->Status == STATUS_SUCCESS)
+ return true;
+ return false;
--- /dev/null
+From a34dc4a9b9e2fb3a45c179a60bb0b26539c96189 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 25 Jan 2023 00:09:02 +0900
+Subject: ksmbd: downgrade ndr version error message to debug
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit a34dc4a9b9e2fb3a45c179a60bb0b26539c96189 upstream.
+
+When user switch samba to ksmbd, The following message flood is coming
+when accessing files. Samba seems to changs dos attribute version to v5.
+This patch downgrade ndr version error message to debug.
+
+$ dmesg
+...
+[68971.766914] ksmbd: v5 version is not supported
+[68971.779808] ksmbd: v5 version is not supported
+[68971.871544] ksmbd: v5 version is not supported
+[68971.910135] ksmbd: v5 version is not supported
+...
+
+Cc: stable@vger.kernel.org
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/ndr.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/ksmbd/ndr.c
++++ b/fs/ksmbd/ndr.c
+@@ -242,7 +242,7 @@ int ndr_decode_dos_attr(struct ndr *n, s
+ return ret;
+
+ if (da->version != 3 && da->version != 4) {
+- pr_err("v%d version is not supported\n", da->version);
++ ksmbd_debug(VFS, "v%d version is not supported\n", da->version);
+ return -EINVAL;
+ }
+
+@@ -251,7 +251,7 @@ int ndr_decode_dos_attr(struct ndr *n, s
+ return ret;
+
+ if (da->version != version2) {
+- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ da->version, version2);
+ return -EINVAL;
+ }
+@@ -457,7 +457,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, s
+ if (ret)
+ return ret;
+ if (acl->version != 4) {
+- pr_err("v%d version is not supported\n", acl->version);
++ ksmbd_debug(VFS, "v%d version is not supported\n", acl->version);
+ return -EINVAL;
+ }
+
+@@ -465,7 +465,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, s
+ if (ret)
+ return ret;
+ if (acl->version != version2) {
+- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ acl->version, version2);
+ return -EINVAL;
+ }
--- /dev/null
+From 62c487b53a7ff31e322cf2874d3796b8202c54a5 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 25 Jan 2023 00:13:20 +0900
+Subject: ksmbd: limit pdu length size according to connection status
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 62c487b53a7ff31e322cf2874d3796b8202c54a5 upstream.
+
+Stream protocol length will never be larger than 16KB until session setup.
+After session setup, the size of requests will not be larger than
+16KB + SMB2 MAX WRITE size. This patch limits these invalidly oversized
+requests and closes the connection immediately.
+
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-18259
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/connection.c | 17 +++++++++++++++--
+ fs/ksmbd/smb2pdu.h | 5 +++--
+ 2 files changed, 18 insertions(+), 4 deletions(-)
+
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -280,7 +280,7 @@ int ksmbd_conn_handler_loop(void *p)
+ {
+ struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+ struct ksmbd_transport *t = conn->transport;
+- unsigned int pdu_size;
++ unsigned int pdu_size, max_allowed_pdu_size;
+ char hdr_buf[4] = {0,};
+ int size;
+
+@@ -305,13 +305,26 @@ int ksmbd_conn_handler_loop(void *p)
+ pdu_size = get_rfc1002_len(hdr_buf);
+ ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+
++ if (conn->status == KSMBD_SESS_GOOD)
++ max_allowed_pdu_size =
++ SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
++ else
++ max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
++
++ if (pdu_size > max_allowed_pdu_size) {
++ pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
++ pdu_size, max_allowed_pdu_size,
++ conn->status);
++ break;
++ }
++
+ /*
+ * Check if pdu size is valid (min : smb header size,
+ * max : 0x00FFFFFF).
+ */
+ if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+ pdu_size > MAX_STREAM_PROT_LEN) {
+- continue;
++ break;
+ }
+
+ /* 4 for rfc1002 length field */
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -24,8 +24,9 @@
+
+ #define SMB21_DEFAULT_IOSIZE (1024 * 1024)
+ #define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
+-#define SMB3_MIN_IOSIZE (64 * 1024)
+-#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
++#define SMB3_MIN_IOSIZE (64 * 1024)
++#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
++#define SMB3_MAX_MSGSIZE (4 * 4096)
+
+ /*
+ * Definitions for SMB2 Protocol Data Units (network frames)
--- /dev/null
+From ef3691683d7bfd0a2acf48812e4ffe894f10bfa8 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 19 Jan 2023 11:07:59 +0000
+Subject: KVM: arm64: GICv4.1: Fix race with doorbell on VPE activation/deactivation
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit ef3691683d7bfd0a2acf48812e4ffe894f10bfa8 upstream.
+
+To save the vgic LPI pending state with GICv4.1, the VPEs must all be
+unmapped from the ITSs so that the sGIC caches can be flushed.
+The opposite is done once the state is saved.
+
+This is all done by using the activate/deactivate irqdomain callbacks
+directly from the vgic code. Crutially, this is done without holding
+the irqdesc lock for the interrupts that represent the VPE. And these
+callbacks are changing the state of the irqdesc. What could possibly
+go wrong?
+
+If a doorbell fires while we are messing with the irqdesc state,
+it will acquire the lock and change the interrupt state concurrently.
+Since we don't hole the lock, curruption occurs in on the interrupt
+state. Oh well.
+
+While acquiring the lock would fix this (and this was Shanker's
+initial approach), this is still a layering violation we could do
+without. A better approach is actually to free the VPE interrupt,
+do what we have to do, and re-request it.
+
+It is more work, but this usually happens only once in the lifetime
+of the VM and we don't really care about this sort of overhead.
+
+Fixes: f66b7b151e00 ("KVM: arm64: GICv4.1: Try to save VLPI state in save_pending_tables")
+Reported-by: Shanker Donthineni <sdonthineni@nvidia.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230118022348.4137094-1-sdonthineni@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-v3.c | 25 +++++++++++--------------
+ arch/arm64/kvm/vgic/vgic-v4.c | 8 ++++++--
+ arch/arm64/kvm/vgic/vgic.h | 1 +
+ 3 files changed, 18 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -350,26 +350,23 @@ retry:
+ * The deactivation of the doorbell interrupt will trigger the
+ * unmapping of the associated vPE.
+ */
+-static void unmap_all_vpes(struct vgic_dist *dist)
++static void unmap_all_vpes(struct kvm *kvm)
+ {
+- struct irq_desc *desc;
++ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+
+- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+- irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+- }
++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
++ free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
+ }
+
+-static void map_all_vpes(struct vgic_dist *dist)
++static void map_all_vpes(struct kvm *kvm)
+ {
+- struct irq_desc *desc;
++ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+
+- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+- irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
+- }
++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
++ WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
++ dist->its_vm.vpes[i]->irq));
+ }
+
+ /**
+@@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct k
+ * and enabling of the doorbells have already been done.
+ */
+ if (kvm_vgic_global_state.has_gicv4_1) {
+- unmap_all_vpes(dist);
++ unmap_all_vpes(kvm);
+ vlpi_avail = true;
+ }
+
+@@ -444,7 +441,7 @@ int vgic_v3_save_pending_tables(struct k
+
+ out:
+ if (vlpi_avail)
+- map_all_vpes(dist);
++ map_all_vpes(kvm);
+
+ return ret;
+ }
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_
+ *val = !!(*ptr & mask);
+ }
+
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
++{
++ return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
++}
++
+ /**
+ * vgic_v4_init - Initialize the GICv4 data structures
+ * @kvm: Pointer to the VM being initialized
+@@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
+ irq_flags &= ~IRQ_NOAUTOEN;
+ irq_set_status_flags(irq, irq_flags);
+
+- ret = request_irq(irq, vgic_v4_doorbell_handler,
+- 0, "vcpu", vcpu);
++ ret = vgic_v4_request_vpe_irq(vcpu, irq);
+ if (ret) {
+ kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+ /*
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -331,5 +331,6 @@ int vgic_v4_init(struct kvm *kvm);
+ void vgic_v4_teardown(struct kvm *kvm);
+ void vgic_v4_configure_vsgis(struct kvm *kvm);
+ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
+
+ #endif
--- /dev/null
+From a44b331614e6f7e63902ed7dff7adc8c85edd8bc Mon Sep 17 00:00:00 2001
+From: Hendrik Borghorst <hborghor@amazon.de>
+Date: Mon, 14 Nov 2022 16:48:23 +0000
+Subject: KVM: x86/vmx: Do not skip segment attributes if unusable bit is set
+
+From: Hendrik Borghorst <hborghor@amazon.de>
+
+commit a44b331614e6f7e63902ed7dff7adc8c85edd8bc upstream.
+
+When serializing and deserializing kvm_sregs, attributes of the segment
+descriptors are stored by user space. For unusable segments,
+vmx_segment_access_rights skips all attributes and sets them to 0.
+
+This means we zero out the DPL (Descriptor Privilege Level) for unusable
+entries.
+
+Unusable segments are - contrary to their name - usable in 64bit mode and
+are used by guests to for example create a linear map through the
+NULL selector.
+
+VMENTER checks if SS.DPL is correct depending on the CS segment type.
+For types 9 (Execute Only) and 11 (Execute Read), CS.DPL must be equal to
+SS.DPL [1].
+
+We have seen real world guests setting CS to a usable segment with DPL=3
+and SS to an unusable segment with DPL=3. Once we go through an sregs
+get/set cycle, SS.DPL turns to 0. This causes the virtual machine to crash
+reproducibly.
+
+This commit changes the attribute logic to always preserve attributes for
+unusable segments. According to [2] SS.DPL is always saved on VM exits,
+regardless of the unusable bit so user space applications should have saved
+the information on serialization correctly.
+
+[3] specifies that besides SS.DPL the rest of the attributes of the
+descriptors are undefined after VM entry if unusable bit is set. So, there
+should be no harm in setting them all to the previous state.
+
+[1] Intel SDM Vol 3C 26.3.1.2 Checks on Guest Segment Registers
+[2] Intel SDM Vol 3C 27.3.2 Saving Segment Registers and Descriptor-Table
+Registers
+[3] Intel SDM Vol 3C 26.3.2.2 Loading Guest Segment Registers and
+Descriptor-Table Registers
+
+Cc: Alexander Graf <graf@amazon.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Hendrik Borghorst <hborghor@amazon.de>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Alexander Graf <graf@amazon.com>
+Message-Id: <20221114164823.69555-1-hborghor@amazon.de>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 21 +++++++++------------
+ 1 file changed, 9 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3412,18 +3412,15 @@ static u32 vmx_segment_access_rights(str
+ {
+ u32 ar;
+
+- if (var->unusable || !var->present)
+- ar = 1 << 16;
+- else {
+- ar = var->type & 15;
+- ar |= (var->s & 1) << 4;
+- ar |= (var->dpl & 3) << 5;
+- ar |= (var->present & 1) << 7;
+- ar |= (var->avl & 1) << 12;
+- ar |= (var->l & 1) << 13;
+- ar |= (var->db & 1) << 14;
+- ar |= (var->g & 1) << 15;
+- }
++ ar = var->type & 15;
++ ar |= (var->s & 1) << 4;
++ ar |= (var->dpl & 3) << 5;
++ ar |= (var->present & 1) << 7;
++ ar |= (var->avl & 1) << 12;
++ ar |= (var->l & 1) << 13;
++ ar |= (var->db & 1) << 14;
++ ar |= (var->g & 1) << 15;
++ ar |= (var->unusable || !var->present) << 16;
+
+ return ar;
+ }
--- /dev/null
+From 4f11ada10d0ad3fd53e2bd67806351de63a4f9c3 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 24 Jan 2023 16:41:18 +0100
+Subject: ovl: fail on invalid uid/gid mapping at copy up
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 4f11ada10d0ad3fd53e2bd67806351de63a4f9c3 upstream.
+
+If st_uid/st_gid doesn't have a mapping in the mounter's user_ns, then
+copy-up should fail, just like it would fail if the mounter task was doing
+the copy using "cp -a".
+
+There's a corner case where the "cp -a" would succeed but copy up fail: if
+there's a mapping of the invalid uid/gid (65534 by default) in the user
+namespace. This is because stat(2) will return this value if the mapping
+doesn't exist in the current user_ns and "cp -a" will in turn be able to
+create a file with this uid/gid.
+
+This behavior would be inconsistent with POSIX ACL's, which return -1 for
+invalid uid/gid which result in a failed copy.
+
+For consistency and simplicity fail the copy of the st_uid/st_gid are
+invalid.
+
+Fixes: 459c7c565ac3 ("ovl: unprivieged mounts")
+Cc: <stable@vger.kernel.org> # v5.11
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Reviewed-by: Seth Forshee <sforshee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/copy_up.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -973,6 +973,10 @@ static int ovl_copy_up_one(struct dentry
+ if (err)
+ return err;
+
++ if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
++ !kgid_has_mapping(current_user_ns(), ctx.stat.gid))
++ return -EOVERFLOW;
++
+ ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
+
+ if (parent) {
--- /dev/null
+From baabaa505563362b71f2637aedd7b807d270656c Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 24 Jan 2023 16:41:18 +0100
+Subject: ovl: fix tmpfile leak
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit baabaa505563362b71f2637aedd7b807d270656c upstream.
+
+Missed an error cleanup.
+
+Reported-by: syzbot+fd749a7ea127a84e0ffd@syzkaller.appspotmail.com
+Fixes: 2b1a77461f16 ("ovl: use vfs_tmpfile_open() helper")
+Cc: <stable@vger.kernel.org> # v6.1
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/copy_up.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -754,7 +754,7 @@ static int ovl_copy_up_tmpfile(struct ov
+ if (!c->metacopy && c->stat.size) {
+ err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size);
+ if (err)
+- return err;
++ goto out_fput;
+ }
+
+ err = ovl_copy_up_metadata(c, temp);
--- /dev/null
+From 1bc5d819f0b9784043ea08570e1b21107aa35739 Mon Sep 17 00:00:00 2001
+From: Mark Pearson <mpearson-lenovo@squebb.ca>
+Date: Tue, 24 Jan 2023 10:36:23 -0500
+Subject: platform/x86: thinkpad_acpi: Fix profile modes on Intel platforms
+
+From: Mark Pearson <mpearson-lenovo@squebb.ca>
+
+commit 1bc5d819f0b9784043ea08570e1b21107aa35739 upstream.
+
+My last commit to fix profile mode displays on AMD platforms caused
+an issue on Intel platforms - sorry!
+
+In it I was reading the current functional mode (MMC, PSC, AMT) from
+the BIOS but didn't account for the fact that on some of our Intel
+platforms I use a different API which returns just the profile and not
+the functional mode.
+
+This commit fixes it so that on Intel platforms it knows the functional
+mode is always MMC.
+
+I also fixed a potential problem that a platform may try to set the mode
+for both MMC and PSC - which was incorrect.
+
+Tested on X1 Carbon 9 (Intel) and Z13 (AMD).
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216963
+Fixes: fde5f74ccfc7 ("platform/x86: thinkpad_acpi: Fix profile mode display in AMT mode")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://lore.kernel.org/r/20230124153623.145188-1-mpearson-lenovo@squebb.ca
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10500,8 +10500,7 @@ static int dytc_profile_set(struct platf
+ if (err)
+ goto unlock;
+ }
+- }
+- if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
++ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
+ if (err)
+ goto unlock;
+@@ -10529,14 +10528,16 @@ static void dytc_profile_refresh(void)
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ else
+ err = dytc_cql_command(DYTC_CMD_GET, &output);
+- } else if (dytc_capabilities & BIT(DYTC_FC_PSC))
++ funcmode = DYTC_FUNCTION_MMC;
++ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ err = dytc_command(DYTC_CMD_GET, &output);
+-
++ /* Check if we are PSC mode, or have AMT enabled */
++ funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
++ }
+ mutex_unlock(&dytc_mutex);
+ if (err)
+ return;
+
+- funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+ convert_dytc_to_profile(funcmode, perfmode, &profile);
+ if (profile != dytc_current_profile) {
--- /dev/null
+From 4bb3d82a1820c1b609ede8eb2332f3cb038c5840 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Date: Fri, 20 Jan 2023 14:14:47 +0100
+Subject: regulator: dt-bindings: samsung,s2mps14: add lost samsung,ext-control-gpios
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+commit 4bb3d82a1820c1b609ede8eb2332f3cb038c5840 upstream.
+
+The samsung,ext-control-gpios property was lost during conversion to DT
+schema:
+
+ exynos3250-artik5-eval.dtb: pmic@66: regulators:LDO11: Unevaluated properties are not allowed ('samsung,ext-control-gpios' was unexpected)
+
+Fixes: ea98b9eba05c ("regulator: dt-bindings: samsung,s2m: convert to dtschema")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20230120131447.289702-1-krzysztof.kozlowski@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml | 21 +++++++++-
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
++++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
+@@ -19,8 +19,8 @@ description: |
+ additional information and example.
+
+ patternProperties:
+- # 25 LDOs
+- "^LDO([1-9]|[1][0-9]|2[0-5])$":
++ # 25 LDOs, without LDO10-12
++ "^LDO([1-9]|1[3-9]|2[0-5])$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+@@ -29,6 +29,23 @@ patternProperties:
+
+ required:
+ - regulator-name
++
++ "^LDO(1[0-2])$":
++ type: object
++ $ref: regulator.yaml#
++ unevaluatedProperties: false
++ description:
++ Properties for single LDO regulator.
++
++ properties:
++ samsung,ext-control-gpios:
++ maxItems: 1
++ description:
++ LDO10, LDO11 and LDO12 can be configured to external control over
++ GPIO.
++
++ required:
++ - regulator-name
+
+ # 5 bucks
+ "^BUCK[1-5]$":
--- /dev/null
+From 5b89c6f9b2df2b7cf6da8e0b2b87c8995b378cad Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <masahiroy@kernel.org>
+Date: Sat, 7 Jan 2023 01:12:13 +0900
+Subject: riscv: fix -Wundef warning for CONFIG_RISCV_BOOT_SPINWAIT
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+commit 5b89c6f9b2df2b7cf6da8e0b2b87c8995b378cad upstream.
+
+Since commit 80b6093b55e3 ("kbuild: add -Wundef to KBUILD_CPPFLAGS
+for W=1 builds"), building with W=1 detects misuse of #if.
+
+ $ make W=1 ARCH=riscv CROSS_COMPILE=riscv64-linux-gnu- arch/riscv/kernel/
+ [snip]
+ AS arch/riscv/kernel/head.o
+ arch/riscv/kernel/head.S:329:5: warning: "CONFIG_RISCV_BOOT_SPINWAIT" is not defined, evaluates to 0 [-Wundef]
+ 329 | #if CONFIG_RISCV_BOOT_SPINWAIT
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~
+
+CONFIG_RISCV_BOOT_SPINWAIT is a bool option. #ifdef should be used.
+
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Fixes: 2ffc48fc7071 ("RISC-V: Move spinwait booting method to its own config")
+Link: https://lore.kernel.org/r/20230106161213.2374093-1-masahiroy@kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/head.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -326,7 +326,7 @@ clear_bss_done:
+ call soc_early_init
+ tail start_kernel
+
+-#if CONFIG_RISCV_BOOT_SPINWAIT
++#ifdef CONFIG_RISCV_BOOT_SPINWAIT
+ .Lsecondary_start:
+ /* Set trap vector to spin forever to help debug */
+ la a3, .Lsecondary_park
--- /dev/null
+From ba81043753fffbc2ad6e0c5ff2659f12ac2f46b4 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 16 Jan 2023 17:12:01 +0100
+Subject: scsi: ufs: core: Fix devfreq deadlocks
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit ba81043753fffbc2ad6e0c5ff2659f12ac2f46b4 upstream.
+
+There is a lock inversion and rwsem read-lock recursion in the devfreq
+target callback which can lead to deadlocks.
+
+Specifically, ufshcd_devfreq_scale() already holds a clk_scaling_lock
+read lock when toggling the write booster, which involves taking the
+dev_cmd mutex before taking another clk_scaling_lock read lock.
+
+This can lead to a deadlock if another thread:
+
+ 1) tries to acquire the dev_cmd and clk_scaling locks in the correct
+ order, or
+
+ 2) takes a clk_scaling write lock before the attempt to take the
+ clk_scaling read lock a second time.
+
+Fix this by dropping the clk_scaling_lock before toggling the write booster
+as was done before commit 0e9d4ca43ba8 ("scsi: ufs: Protect some contexts
+from unexpected clock scaling").
+
+While the devfreq callbacks are already serialised, add a second
+serialising mutex to handle the unlikely case where a callback triggered
+through the devfreq sysfs interface is racing with a request to disable
+clock scaling through the UFS controller 'clkscale_enable' sysfs
+attribute. This could otherwise lead to the write booster being left
+disabled after having disabled clock scaling.
+
+Also take the new mutex in ufshcd_clk_scaling_allow() to make sure that any
+pending write booster update has completed on return.
+
+Note that this currently only affects Qualcomm platforms since commit
+87bd05016a64 ("scsi: ufs: core: Allow host driver to disable wb toggling
+during clock scaling").
+
+The lock inversion (i.e. 1 above) was reported by lockdep as:
+
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 6.1.0-next-20221216 #211 Not tainted
+ ------------------------------------------------------
+ kworker/u16:2/71 is trying to acquire lock:
+ ffff076280ba98a0 (&hba->dev_cmd.lock){+.+.}-{3:3}, at: ufshcd_query_flag+0x50/0x1c0
+
+ but task is already holding lock:
+ ffff076280ba9cf0 (&hba->clk_scaling_lock){++++}-{3:3}, at: ufshcd_devfreq_scale+0x2b8/0x380
+
+ which lock already depends on the new lock.
+[ +0.011606]
+ the existing dependency chain (in reverse order) is:
+
+ -> #1 (&hba->clk_scaling_lock){++++}-{3:3}:
+ lock_acquire+0x68/0x90
+ down_read+0x58/0x80
+ ufshcd_exec_dev_cmd+0x70/0x2c0
+ ufshcd_verify_dev_init+0x68/0x170
+ ufshcd_probe_hba+0x398/0x1180
+ ufshcd_async_scan+0x30/0x320
+ async_run_entry_fn+0x34/0x150
+ process_one_work+0x288/0x6c0
+ worker_thread+0x74/0x450
+ kthread+0x118/0x120
+ ret_from_fork+0x10/0x20
+
+ -> #0 (&hba->dev_cmd.lock){+.+.}-{3:3}:
+ __lock_acquire+0x12a0/0x2240
+ lock_acquire.part.0+0xcc/0x220
+ lock_acquire+0x68/0x90
+ __mutex_lock+0x98/0x430
+ mutex_lock_nested+0x2c/0x40
+ ufshcd_query_flag+0x50/0x1c0
+ ufshcd_query_flag_retry+0x64/0x100
+ ufshcd_wb_toggle+0x5c/0x120
+ ufshcd_devfreq_scale+0x2c4/0x380
+ ufshcd_devfreq_target+0xf4/0x230
+ devfreq_set_target+0x84/0x2f0
+ devfreq_update_target+0xc4/0xf0
+ devfreq_monitor+0x38/0x1f0
+ process_one_work+0x288/0x6c0
+ worker_thread+0x74/0x450
+ kthread+0x118/0x120
+ ret_from_fork+0x10/0x20
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+ CPU0 CPU1
+ ---- ----
+ lock(&hba->clk_scaling_lock);
+ lock(&hba->dev_cmd.lock);
+ lock(&hba->clk_scaling_lock);
+ lock(&hba->dev_cmd.lock);
+
+ *** DEADLOCK ***
+
+Fixes: 0e9d4ca43ba8 ("scsi: ufs: Protect some contexts from unexpected clock scaling")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Can Guo <quic_cang@quicinc.com>
+Tested-by: Andrew Halaney <ahalaney@redhat.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20230116161201.16923-1-johan+linaro@kernel.org
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ufs/core/ufshcd.c | 29 +++++++++++++++--------------
+ include/ufs/ufshcd.h | 2 ++
+ 2 files changed, 17 insertions(+), 14 deletions(-)
+
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -1231,12 +1231,14 @@ static int ufshcd_clock_scaling_prepare(
+ * clock scaling is in progress
+ */
+ ufshcd_scsi_block_requests(hba);
++ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+
+ if (!hba->clk_scaling.is_allowed ||
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
++ mutex_unlock(&hba->wb_mutex);
+ ufshcd_scsi_unblock_requests(hba);
+ goto out;
+ }
+@@ -1248,12 +1250,16 @@ out:
+ return ret;
+ }
+
+-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
++static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+ {
+- if (writelock)
+- up_write(&hba->clk_scaling_lock);
+- else
+- up_read(&hba->clk_scaling_lock);
++ up_write(&hba->clk_scaling_lock);
++
++ /* Enable Write Booster if we have scaled up else disable it */
++ if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
++ ufshcd_wb_toggle(hba, scale_up);
++
++ mutex_unlock(&hba->wb_mutex);
++
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
+ }
+@@ -1270,7 +1276,6 @@ static void ufshcd_clock_scaling_unprepa
+ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ {
+ int ret = 0;
+- bool is_writelock = true;
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+@@ -1299,15 +1304,8 @@ static int ufshcd_devfreq_scale(struct u
+ }
+ }
+
+- /* Enable Write Booster if we have scaled up else disable it */
+- if (ufshcd_enable_wb_if_scaling_up(hba)) {
+- downgrade_write(&hba->clk_scaling_lock);
+- is_writelock = false;
+- ufshcd_wb_toggle(hba, scale_up);
+- }
+-
+ out_unprepare:
+- ufshcd_clock_scaling_unprepare(hba, is_writelock);
++ ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ return ret;
+ }
+
+@@ -6104,9 +6102,11 @@ static void ufshcd_force_error_recovery(
+
+ static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+ {
++ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = allow;
+ up_write(&hba->clk_scaling_lock);
++ mutex_unlock(&hba->wb_mutex);
+ }
+
+ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+@@ -9773,6 +9773,7 @@ int ufshcd_init(struct ufs_hba *hba, voi
+ /* Initialize mutex for exception event control */
+ mutex_init(&hba->ee_ctrl_mutex);
+
++ mutex_init(&hba->wb_mutex);
+ init_rwsem(&hba->clk_scaling_lock);
+
+ ufshcd_init_clk_gating(hba);
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -806,6 +806,7 @@ struct ufs_hba_monitor {
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
++ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
+ * @clk_scaling_lock: used to serialize device commands and clock scaling
+ * @desc_size: descriptor sizes reported by device
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
+@@ -948,6 +949,7 @@ struct ufs_hba {
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
+
++ struct mutex wb_mutex;
+ struct rw_semaphore clk_scaling_lock;
+ unsigned char desc_size[QUERY_DESC_IDN_MAX];
+ atomic_t scsi_block_reqs_cnt;
trace_events_hist-add-check-for-return-value-of-create_hist_field.patch
ftrace-scripts-update-the-instructions-for-ftrace-bisect.sh.patch
cifs-fix-oops-due-to-uncleared-server-smbd_conn-in-reconnect.patch
+ksmbd-add-max-connections-parameter.patch
+ksmbd-do-not-sign-response-to-session-request-for-guest-login.patch
+ksmbd-downgrade-ndr-version-error-message-to-debug.patch
+ksmbd-limit-pdu-length-size-according-to-connection-status.patch
+ovl-fix-tmpfile-leak.patch
+ovl-fail-on-invalid-uid-gid-mapping-at-copy-up.patch
+io_uring-net-cache-provided-buffer-group-value-for-multishot-receives.patch
+kvm-x86-vmx-do-not-skip-segment-attributes-if-unusable-bit-is-set.patch
+kvm-arm64-gicv4.1-fix-race-with-doorbell-on-vpe-activation-deactivation.patch
+scsi-ufs-core-fix-devfreq-deadlocks.patch
+riscv-fix-wundef-warning-for-config_riscv_boot_spinwait.patch
+thermal-intel-int340x-protect-trip-temperature-from-concurrent-updates.patch
+regulator-dt-bindings-samsung-s2mps14-add-lost-samsung-ext-control-gpios.patch
+ipv6-fix-reachability-confirmation-with-proxy_ndp.patch
+arm-9280-1-mm-fix-warning-on-phys_addr_t-to-void-pointer-assignment.patch
+edac-device-respect-any-driver-supplied-workqueue-polling-value.patch
+edac-qcom-do-not-pass-llcc_driv_data-as-edac_device_ctl_info-s-pvt_info.patch
+platform-x86-thinkpad_acpi-fix-profile-modes-on-intel-platforms.patch
+drm-display-dp_mst-correct-the-kref-of-port.patch
+drm-amd-pm-add-missing-allowihinterrupt-message-mapping-for-smu13.0.0.patch
+drm-amdgpu-remove-unconditional-trap-enable-on-add-gfx11-queues.patch
+drm-amdgpu-display-mst-fix-mst_state-pbn_div-and-slot-count-assignments.patch
+drm-amdgpu-display-mst-limit-payload-to-be-updated-one-by-one.patch
+drm-amdgpu-display-mst-update-mst_mgr-relevant-variable-when-long-hpd.patch
--- /dev/null
+From 6757a7abe47bcb12cb2d45661067e182424b0ee3 Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Mon, 23 Jan 2023 09:21:10 -0800
+Subject: thermal: intel: int340x: Protect trip temperature from concurrent updates
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit 6757a7abe47bcb12cb2d45661067e182424b0ee3 upstream.
+
+Trip temperatures are read using ACPI methods and stored in the memory
+during zone initializtion and when the firmware sends a notification for
+change. This trip temperature is returned when the thermal core calls via
+callback get_trip_temp().
+
+But it is possible that while updating the memory copy of the trips when
+the firmware sends a notification for change, thermal core is reading the
+trip temperature via the callback get_trip_temp(). This may return invalid
+trip temperature.
+
+To address this add a mutex to protect the invalid temperature reads in
+the callback get_trip_temp() and int340x_thermal_read_trips().
+
+Fixes: 5fbf7f27fa3d ("Thermal/int340x: Add common thermal zone handler")
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: 5.0+ <stable@vger.kernel.org> # 5.0+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c | 18 +++++++++--
+ drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h | 1
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+@@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp
+ int trip, int *temp)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
+- int i;
++ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_temp)
+ return d->override_ops->get_trip_temp(zone, trip, temp);
+
++ mutex_lock(&d->trip_mutex);
++
+ if (trip < d->aux_trip_nr)
+ *temp = d->aux_trips[trip];
+ else if (trip == d->crt_trip_id)
+@@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ mutex_unlock(&d->trip_mutex);
++
++ return ret;
+ }
+
+ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+@@ -180,6 +184,8 @@ int int340x_thermal_read_trips(struct in
+ int trip_cnt = int34x_zone->aux_trip_nr;
+ int i;
+
++ mutex_lock(&int34x_zone->trip_mutex);
++
+ int34x_zone->crt_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+ &int34x_zone->crt_temp))
+@@ -207,6 +213,8 @@ int int340x_thermal_read_trips(struct in
+ int34x_zone->act_trips[i].valid = true;
+ }
+
++ mutex_unlock(&int34x_zone->trip_mutex);
++
+ return trip_cnt;
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+@@ -230,6 +238,8 @@ struct int34x_thermal_zone *int340x_ther
+ if (!int34x_thermal_zone)
+ return ERR_PTR(-ENOMEM);
+
++ mutex_init(&int34x_thermal_zone->trip_mutex);
++
+ int34x_thermal_zone->adev = adev;
+ int34x_thermal_zone->override_ops = override_ops;
+
+@@ -281,6 +291,7 @@ err_thermal_zone:
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
+ err_trip_alloc:
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ return ERR_PTR(ret);
+ }
+@@ -292,6 +303,7 @@ void int340x_thermal_zone_remove(struct
+ thermal_zone_device_unregister(int34x_thermal_zone->zone);
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
+ struct thermal_zone_device_ops *override_ops;
+ void *priv_data;
+ struct acpi_lpat_conversion_table *lpat_table;
++ struct mutex trip_mutex;
+ };
+
+ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,