--- /dev/null
+From fc3586ba99500e027c5340c96000bce7db45d1e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 01:02:28 +0900
+Subject: btrfs: zoned: factor out prepare_allocation_zoned()
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit b271fee9a41ca1474d30639fd6cc912c9901d0f8 ]
+
+Factor out prepare_allocation_zoned() for further extension. While at
+it, optimize the if-branch a bit.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 02444f2ac26e ("btrfs: zoned: optimize hint byte for zoned allocator")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 32 +++++++++++++++++++-------------
+ 1 file changed, 19 insertions(+), 13 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d421e289dc73..69abb6eb81df 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4139,6 +4139,24 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
+ return 0;
+ }
+
++static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
++ struct find_free_extent_ctl *ffe_ctl)
++{
++ if (ffe_ctl->for_treelog) {
++ spin_lock(&fs_info->treelog_bg_lock);
++ if (fs_info->treelog_bg)
++ ffe_ctl->hint_byte = fs_info->treelog_bg;
++ spin_unlock(&fs_info->treelog_bg_lock);
++ } else if (ffe_ctl->for_data_reloc) {
++ spin_lock(&fs_info->relocation_bg_lock);
++ if (fs_info->data_reloc_bg)
++ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
++ spin_unlock(&fs_info->relocation_bg_lock);
++ }
++
++ return 0;
++}
++
+ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info,
+@@ -4149,19 +4167,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ return prepare_allocation_clustered(fs_info, ffe_ctl,
+ space_info, ins);
+ case BTRFS_EXTENT_ALLOC_ZONED:
+- if (ffe_ctl->for_treelog) {
+- spin_lock(&fs_info->treelog_bg_lock);
+- if (fs_info->treelog_bg)
+- ffe_ctl->hint_byte = fs_info->treelog_bg;
+- spin_unlock(&fs_info->treelog_bg_lock);
+- }
+- if (ffe_ctl->for_data_reloc) {
+- spin_lock(&fs_info->relocation_bg_lock);
+- if (fs_info->data_reloc_bg)
+- ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+- spin_unlock(&fs_info->relocation_bg_lock);
+- }
+- return 0;
++ return prepare_allocation_zoned(fs_info, ffe_ctl);
+ default:
+ BUG();
+ }
+--
+2.43.0
+
--- /dev/null
+From a5b54315a09611fe06f64cef6372f5cf69ec1829 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 01:02:29 +0900
+Subject: btrfs: zoned: optimize hint byte for zoned allocator
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit 02444f2ac26eae6385a65fcd66915084d15dffba ]
+
+Writing sequentially to a huge file on btrfs on a SMR HDD revealed a
+decline of the performance (220 MiB/s to 30 MiB/s after 500 minutes).
+
+The performance goes down because of increased latency of the extent
+allocation, which is induced by a traversing of a lot of full block groups.
+
+So, this patch optimizes the ffe_ctl->hint_byte by choosing a block group
+with sufficient size from the active block group list, which does not
+contain full block groups.
+
+After applying the patch, the performance is maintained well.
+
+Fixes: 2eda57089ea3 ("btrfs: zoned: implement sequential extent allocation")
+CC: stable@vger.kernel.org # 5.15+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 69abb6eb81df..b89b558b1592 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4152,6 +4152,24 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
+ if (fs_info->data_reloc_bg)
+ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+ spin_unlock(&fs_info->relocation_bg_lock);
++ } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
++ struct btrfs_block_group *block_group;
++
++ spin_lock(&fs_info->zone_active_bgs_lock);
++ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
++ /*
++ * No lock is OK here because avail is monotinically
++ * decreasing, and this is just a hint.
++ */
++ u64 avail = block_group->zone_capacity - block_group->alloc_offset;
++
++ if (block_group_bits(block_group, ffe_ctl->flags) &&
++ avail >= ffe_ctl->num_bytes) {
++ ffe_ctl->hint_byte = block_group->start;
++ break;
++ }
++ }
++ spin_unlock(&fs_info->zone_active_bgs_lock);
+ }
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 326ecab58ed7ae064ae207aa8fc32e6c6c2c961a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jan 2024 12:23:55 -0500
+Subject: drm/amdgpu/gfx10: set UNORD_DISPATCH in compute MQDs
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 03ff6d7238b77e5fb2b85dc5fe01d2db9eb893bd ]
+
+This needs to be set to 1 to avoid a potential deadlock in
+the GC 10.x and newer. On GC 9.x and older, this needs
+to be set to 0. This can lead to hangs in some mixed
+graphics and compute workloads. Updated firmware is also
+required for AQL.
+
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 34dc3d5bbf35..c2b9dfc6451d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -6572,7 +6572,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ #ifdef __BIG_ENDIAN
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+ #endif
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+index 8b7fed913526..22cbfa1bdadd 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+@@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
++ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+--
+2.43.0
+
--- /dev/null
+From 8e3476f60015f3d1067c63814bd8c8062ee671f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jan 2024 12:32:59 -0500
+Subject: drm/amdgpu/gfx11: set UNORD_DISPATCH in compute MQDs
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 3380fcad2c906872110d31ddf7aa1fdea57f9df6 ]
+
+This needs to be set to 1 to avoid a potential deadlock in
+the GC 10.x and newer. On GC 9.x and older, this needs
+to be set to 0. This can lead to hangs in some mixed
+graphics and compute workloads. Updated firmware is also
+required for AQL.
+
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 59efd4ece92d..d0c3ec9f4fb6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -3807,7 +3807,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ (order_base_2(prop->queue_size / 4) - 1));
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+index 15277f1d5cf0..d722cbd31783 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+@@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
++ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+--
+2.43.0
+
--- /dev/null
+From 30fb1d221fab78649c62870404512935f375128d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Oct 2023 12:52:33 +0300
+Subject: drm/i915/lnl: Remove watchdog timers for PSR
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mika Kahola <mika.kahola@intel.com>
+
+[ Upstream commit a2cd15c2411624a7a97bad60d98d7e0a1e5002a6 ]
+
+Watchdog timers for Lunarlake HW were removed for PSR/PSR2
+The patch removes the use of these timers from the driver code.
+
+BSpec: 69895
+
+v2: Reword commit message (Ville)
+ Drop HPD mask from LNL (Ville)
+ Revise masking logic (Jouni)
+v3: Revise commit message (Ville)
+ Revert HPD mask removal as irrelevant for this patch (Ville)
+
+Signed-off-by: Mika Kahola <mika.kahola@intel.com>
+Reviewed-by: Jouni Högander <jouni.hogander@intel.com>
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231010095233.590613-1-mika.kahola@intel.com
+Stable-dep-of: f9f031dd21a7 ("drm/i915/psr: Only allow PSR in LPSP mode on HSW non-ULT")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 97d5eef10130..848ac483259b 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -674,7 +674,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
+
+ val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
+
+- val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
++ if (DISPLAY_VER(dev_priv) < 20)
++ val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
++
+ if (IS_HASWELL(dev_priv))
+ val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+@@ -1399,8 +1401,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+- EDP_PSR_DEBUG_MASK_LPSP |
+- EDP_PSR_DEBUG_MASK_MAX_SLEEP;
++ EDP_PSR_DEBUG_MASK_LPSP;
++
++ if (DISPLAY_VER(dev_priv) < 20)
++ mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+ /*
+ * No separate pipe reg write mask on hsw/bdw, so have to unmask all
+--
+2.43.0
+
--- /dev/null
+From 8f6915eb3d049a37a32fb411f40955ea6b4fb772 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Jan 2024 23:21:31 +0200
+Subject: drm/i915/psr: Only allow PSR in LPSP mode on HSW non-ULT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit f9f031dd21a7ce13a13862fa5281d32e1029c70f ]
+
+On HSW non-ULT (or at least on Dell Latitude E6540) external displays
+start to flicker when we enable PSR on the eDP. We observe a much higher
+SR and PC6 residency than should be possible with an external display,
+and indeen much higher than what we observe with eDP disabled and
+only the external display enabled. Looks like the hardware is somehow
+ignoring the fact that the external display is active during PSR.
+
+I wasn't able to redproduce this on my HSW ULT machine, or BDW.
+So either there's something specific about this particular laptop
+(eg. some unknown firmware thing) or the issue is limited to just
+non-ULT HSW systems. All known registers that could affect this
+look perfectly reasonable on the affected machine.
+
+As a workaround let's unmask the LPSP event to prevent PSR entry
+except while in LPSP mode (only pipe A + eDP active). This
+will prevent PSR entry entirely when multiple pipes are active.
+The one slight downside is that we now also prevent PSR entry
+when driving eDP with pipe B or C, but I think that's a reasonable
+tradeoff to avoid having to implement a more complex workaround.
+
+Cc: stable@vger.kernel.org
+Fixes: 783d8b80871f ("drm/i915/psr: Re-enable PSR1 on hsw/bdw")
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/10092
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240118212131.31868-1-ville.syrjala@linux.intel.com
+Reviewed-by: Jouni Högander <jouni.hogander@intel.com>
+(cherry picked from commit 94501c3ca6400e463ff6cc0c9cf4a2feb6a9205d)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 848ac483259b..5cf3db7058b9 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1400,8 +1400,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ * can rely on frontbuffer tracking.
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+- EDP_PSR_DEBUG_MASK_HPD |
+- EDP_PSR_DEBUG_MASK_LPSP;
++ EDP_PSR_DEBUG_MASK_HPD;
++
++ /*
++ * For some unknown reason on HSW non-ULT (or at least on
++ * Dell Latitude E6540) external displays start to flicker
++ * when PSR is enabled on the eDP. SR/PC6 residency is much
++ * higher than should be possible with an external display.
++ * As a workaround leave LPSP unmasked to prevent PSR entry
++ * when external displays are active.
++ */
++ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
++ mask |= EDP_PSR_DEBUG_MASK_LPSP;
+
+ if (DISPLAY_VER(dev_priv) < 20)
+ mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+--
+2.43.0
+
--- /dev/null
+From 80ddcb002cacaea48cefb6404c21b4d9af8e7d8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Oct 2023 11:04:56 +0800
+Subject: drm/panel-edp: Add AUO B116XTN02, BOE NT116WHM-N21,836X2,
+ NV116WHM-N49 V8.0
+
+From: Sheng-Liang Pan <sheng-liang.pan@quanta.corp-partner.google.com>
+
+[ Upstream commit 3db2420422a5912d97966e0176050bb0fc9aa63e ]
+
+Add panel identification entry for
+- AUO B116XTN02 family (product ID:0x235c)
+- BOE NT116WHM-N21,836X2 (product ID:0x09c3)
+- BOE NV116WHM-N49 V8.0 (product ID:0x0979)
+
+Signed-off-by: Sheng-Liang Pan <sheng-liang.pan@quanta.corp-partner.google.com>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231027110435.1.Ia01fe9ec1c0953e0050a232eaa782fef2c037516@changeid
+Stable-dep-of: fc6e76792965 ("drm/panel-edp: drm/panel-edp: Fix AUO B116XAK01 name and timing")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-edp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 95c8472d878a..5bf28c8443ef 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -1840,6 +1840,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+@@ -1848,8 +1849,10 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
++ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
++ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+--
+2.43.0
+
--- /dev/null
+From 24495cd6003a6c30b3ce9364fe1d7fbeebd8e0a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Nov 2023 12:41:51 -0800
+Subject: drm/panel-edp: drm/panel-edp: Fix AUO B116XAK01 name and timing
+
+From: Hsin-Yi Wang <hsinyi@chromium.org>
+
+[ Upstream commit fc6e7679296530106ee0954e8ddef1aa58b2e0b5 ]
+
+Rename AUO 0x405c B116XAK01 to B116XAK01.0 and adjust the timing of
+auo_b116xak01: T3=200, T12=500, T7_max = 50 according to decoding edid
+and datasheet.
+
+Fixes: da458286a5e2 ("drm/panel: Add support for AUO B116XAK01 panel")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hsin-Yi Wang <hsinyi@chromium.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Acked-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231107204611.3082200-2-hsinyi@chromium.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-edp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 5bf28c8443ef..e93e54a98260 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -973,6 +973,8 @@ static const struct panel_desc auo_b116xak01 = {
+ },
+ .delay = {
+ .hpd_absent = 200,
++ .unprepare = 500,
++ .enable = 50,
+ },
+ };
+
+@@ -1841,7 +1843,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02"),
+- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+--
+2.43.0
+
--- /dev/null
+From f39a78944896457202e81d0036a98f9c0fda6d59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Nov 2023 12:41:52 -0800
+Subject: drm/panel-edp: drm/panel-edp: Fix AUO B116XTN02 name
+
+From: Hsin-Yi Wang <hsinyi@chromium.org>
+
+[ Upstream commit 962845c090c4f85fa4f6872a5b6c89ee61f53cc0 ]
+
+Rename AUO 0x235c B116XTN02 to B116XTN02.3 according to decoding edid.
+
+Fixes: 3db2420422a5 ("drm/panel-edp: Add AUO B116XTN02, BOE NT116WHM-N21,836X2, NV116WHM-N49 V8.0")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hsin-Yi Wang <hsinyi@chromium.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Acked-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231107204611.3082200-3-hsinyi@chromium.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-edp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index e93e54a98260..7dc6fb7308ce 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -1842,7 +1842,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
+- EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+--
+2.43.0
+
--- /dev/null
+From cab3c76a57beea9d7892296528efc7d25bfb5ee5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Sep 2023 09:57:53 +0200
+Subject: fs/pipe: move check to pipe_has_watch_queue()
+
+From: Max Kellermann <max.kellermann@ionos.com>
+
+[ Upstream commit b4bd6b4bac8edd61eb8f7b836969d12c0c6af165 ]
+
+This declutters the code by reducing the number of #ifdefs and makes
+the watch_queue checks simpler. This has no runtime effect; the
+machine code is identical.
+
+Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
+Message-Id: <20230921075755.1378787-2-max.kellermann@ionos.com>
+Reviewed-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Stable-dep-of: e95aada4cb93 ("pipe: wakeup wr_wait after setting max_usage")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/pipe.c | 12 +++---------
+ include/linux/pipe_fs_i.h | 16 ++++++++++++++++
+ 2 files changed, 19 insertions(+), 9 deletions(-)
+
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 139190165a1c..603ab19b0861 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -437,12 +437,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+ goto out;
+ }
+
+-#ifdef CONFIG_WATCH_QUEUE
+- if (pipe->watch_queue) {
++ if (pipe_has_watch_queue(pipe)) {
+ ret = -EXDEV;
+ goto out;
+ }
+-#endif
+
+ /*
+ * If it wasn't empty we try to merge new data into
+@@ -1324,10 +1322,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
+ unsigned int nr_slots, size;
+ long ret = 0;
+
+-#ifdef CONFIG_WATCH_QUEUE
+- if (pipe->watch_queue)
++ if (pipe_has_watch_queue(pipe))
+ return -EBUSY;
+-#endif
+
+ size = round_pipe_size(arg);
+ nr_slots = size >> PAGE_SHIFT;
+@@ -1379,10 +1375,8 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
+
+ if (file->f_op != &pipefifo_fops || !pipe)
+ return NULL;
+-#ifdef CONFIG_WATCH_QUEUE
+- if (for_splice && pipe->watch_queue)
++ if (for_splice && pipe_has_watch_queue(pipe))
+ return NULL;
+-#endif
+ return pipe;
+ }
+
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 608a9eb86bff..288a8081a9db 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -124,6 +124,22 @@ struct pipe_buf_operations {
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ };
+
++/**
++ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
++ * i.e. it was created with O_NOTIFICATION_PIPE
++ * @pipe: The pipe to check
++ *
++ * Return: true if pipe is a watch queue, false otherwise.
++ */
++static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
++{
++#ifdef CONFIG_WATCH_QUEUE
++ return pipe->watch_queue != NULL;
++#else
++ return false;
++#endif
++}
++
+ /**
+ * pipe_empty - Return true if the pipe is empty
+ * @head: The pipe ring head pointer
+--
+2.43.0
+
--- /dev/null
+From a4a58d02965195606ccc0dbb9893176dd19f4604 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Nov 2023 10:50:48 +0100
+Subject: media: i2c: imx290: Properly encode registers as little-endian
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit 60fc87a69523c294eb23a1316af922f6665a6f8c ]
+
+The conversion to CCI also converted the multi-byte register access to
+big-endian. Correct the register definition by using the correct
+little-endian ones.
+
+Fixes: af73323b9770 ("media: imx290: Convert to new CCI register access helpers")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+[Sakari Ailus: Fixed the Fixes: tag.]
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/imx290.c | 42 +++++++++++++++++++-------------------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
+index 29098612813c..c6fea5837a19 100644
+--- a/drivers/media/i2c/imx290.c
++++ b/drivers/media/i2c/imx290.c
+@@ -41,18 +41,18 @@
+ #define IMX290_WINMODE_720P (1 << 4)
+ #define IMX290_WINMODE_CROP (4 << 4)
+ #define IMX290_FR_FDG_SEL CCI_REG8(0x3009)
+-#define IMX290_BLKLEVEL CCI_REG16(0x300a)
++#define IMX290_BLKLEVEL CCI_REG16_LE(0x300a)
+ #define IMX290_GAIN CCI_REG8(0x3014)
+-#define IMX290_VMAX CCI_REG24(0x3018)
++#define IMX290_VMAX CCI_REG24_LE(0x3018)
+ #define IMX290_VMAX_MAX 0x3ffff
+-#define IMX290_HMAX CCI_REG16(0x301c)
++#define IMX290_HMAX CCI_REG16_LE(0x301c)
+ #define IMX290_HMAX_MAX 0xffff
+-#define IMX290_SHS1 CCI_REG24(0x3020)
++#define IMX290_SHS1 CCI_REG24_LE(0x3020)
+ #define IMX290_WINWV_OB CCI_REG8(0x303a)
+-#define IMX290_WINPV CCI_REG16(0x303c)
+-#define IMX290_WINWV CCI_REG16(0x303e)
+-#define IMX290_WINPH CCI_REG16(0x3040)
+-#define IMX290_WINWH CCI_REG16(0x3042)
++#define IMX290_WINPV CCI_REG16_LE(0x303c)
++#define IMX290_WINWV CCI_REG16_LE(0x303e)
++#define IMX290_WINPH CCI_REG16_LE(0x3040)
++#define IMX290_WINWH CCI_REG16_LE(0x3042)
+ #define IMX290_OUT_CTRL CCI_REG8(0x3046)
+ #define IMX290_ODBIT_10BIT (0 << 0)
+ #define IMX290_ODBIT_12BIT (1 << 0)
+@@ -78,28 +78,28 @@
+ #define IMX290_ADBIT2 CCI_REG8(0x317c)
+ #define IMX290_ADBIT2_10BIT 0x12
+ #define IMX290_ADBIT2_12BIT 0x00
+-#define IMX290_CHIP_ID CCI_REG16(0x319a)
++#define IMX290_CHIP_ID CCI_REG16_LE(0x319a)
+ #define IMX290_ADBIT3 CCI_REG8(0x31ec)
+ #define IMX290_ADBIT3_10BIT 0x37
+ #define IMX290_ADBIT3_12BIT 0x0e
+ #define IMX290_REPETITION CCI_REG8(0x3405)
+ #define IMX290_PHY_LANE_NUM CCI_REG8(0x3407)
+ #define IMX290_OPB_SIZE_V CCI_REG8(0x3414)
+-#define IMX290_Y_OUT_SIZE CCI_REG16(0x3418)
+-#define IMX290_CSI_DT_FMT CCI_REG16(0x3441)
++#define IMX290_Y_OUT_SIZE CCI_REG16_LE(0x3418)
++#define IMX290_CSI_DT_FMT CCI_REG16_LE(0x3441)
+ #define IMX290_CSI_DT_FMT_RAW10 0x0a0a
+ #define IMX290_CSI_DT_FMT_RAW12 0x0c0c
+ #define IMX290_CSI_LANE_MODE CCI_REG8(0x3443)
+-#define IMX290_EXTCK_FREQ CCI_REG16(0x3444)
+-#define IMX290_TCLKPOST CCI_REG16(0x3446)
+-#define IMX290_THSZERO CCI_REG16(0x3448)
+-#define IMX290_THSPREPARE CCI_REG16(0x344a)
+-#define IMX290_TCLKTRAIL CCI_REG16(0x344c)
+-#define IMX290_THSTRAIL CCI_REG16(0x344e)
+-#define IMX290_TCLKZERO CCI_REG16(0x3450)
+-#define IMX290_TCLKPREPARE CCI_REG16(0x3452)
+-#define IMX290_TLPX CCI_REG16(0x3454)
+-#define IMX290_X_OUT_SIZE CCI_REG16(0x3472)
++#define IMX290_EXTCK_FREQ CCI_REG16_LE(0x3444)
++#define IMX290_TCLKPOST CCI_REG16_LE(0x3446)
++#define IMX290_THSZERO CCI_REG16_LE(0x3448)
++#define IMX290_THSPREPARE CCI_REG16_LE(0x344a)
++#define IMX290_TCLKTRAIL CCI_REG16_LE(0x344c)
++#define IMX290_THSTRAIL CCI_REG16_LE(0x344e)
++#define IMX290_TCLKZERO CCI_REG16_LE(0x3450)
++#define IMX290_TCLKPREPARE CCI_REG16_LE(0x3452)
++#define IMX290_TLPX CCI_REG16_LE(0x3454)
++#define IMX290_X_OUT_SIZE CCI_REG16_LE(0x3472)
+ #define IMX290_INCKSEL7 CCI_REG8(0x3480)
+
+ #define IMX290_PGCTRL_REGEN BIT(0)
+--
+2.43.0
+
--- /dev/null
+From 432f3471b3b066f02deb8e35b1a1684355699361 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Nov 2023 17:42:40 +0200
+Subject: media: v4l: cci: Add macros to obtain register width and address
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit cd93cc245dfe334c38da98c14b34f9597e1b4ea6 ]
+
+Add CCI_REG_WIDTH() macro to obtain register width in bits and similarly,
+CCI_REG_WIDTH_BYTES() to obtain it in bytes.
+
+Also add CCI_REG_ADDR() macro to obtain the address of a register.
+
+Use both macros in v4l2-cci.c, too.
+
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Stable-dep-of: d92e7a013ff3 ("media: v4l2-cci: Add support for little-endian encoded registers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/v4l2-core/v4l2-cci.c | 8 ++++----
+ include/media/v4l2-cci.h | 5 +++++
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
+index bc2dbec019b0..3179160abde3 100644
+--- a/drivers/media/v4l2-core/v4l2-cci.c
++++ b/drivers/media/v4l2-core/v4l2-cci.c
+@@ -25,8 +25,8 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ if (err && *err)
+ return *err;
+
+- len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+- reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
++ len = CCI_REG_WIDTH_BYTES(reg);
++ reg = CCI_REG_ADDR(reg);
+
+ ret = regmap_bulk_read(map, reg, buf, len);
+ if (ret) {
+@@ -75,8 +75,8 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ if (err && *err)
+ return *err;
+
+- len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+- reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
++ len = CCI_REG_WIDTH_BYTES(reg);
++ reg = CCI_REG_ADDR(reg);
+
+ switch (len) {
+ case 1:
+diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
+index f2c2962e936b..a2835a663df5 100644
+--- a/include/media/v4l2-cci.h
++++ b/include/media/v4l2-cci.h
+@@ -7,6 +7,7 @@
+ #ifndef _V4L2_CCI_H
+ #define _V4L2_CCI_H
+
++#include <linux/bitfield.h>
+ #include <linux/bits.h>
+ #include <linux/types.h>
+
+@@ -34,6 +35,10 @@ struct cci_reg_sequence {
+ #define CCI_REG_WIDTH_SHIFT 16
+ #define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+
++#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x)
++#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3)
++#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x)
++
+ #define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+--
+2.43.0
+
--- /dev/null
+From 923eb231697893efb95c39ba32ba78260ceffff3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Nov 2023 10:45:30 +0200
+Subject: media: v4l: cci: Include linux/bits.h
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit eba5058633b4d11e2a4d65eae9f1fce0b96365d9 ]
+
+linux/bits.h is needed for GENMASK(). Include it.
+
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Stable-dep-of: d92e7a013ff3 ("media: v4l2-cci: Add support for little-endian encoded registers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/media/v4l2-cci.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
+index 0f6803e4b17e..f2c2962e936b 100644
+--- a/include/media/v4l2-cci.h
++++ b/include/media/v4l2-cci.h
+@@ -7,6 +7,7 @@
+ #ifndef _V4L2_CCI_H
+ #define _V4L2_CCI_H
+
++#include <linux/bits.h>
+ #include <linux/types.h>
+
+ struct i2c_client;
+--
+2.43.0
+
--- /dev/null
+From d4b0040e47cf6182193fc884f2425ef2815a8ad1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Nov 2023 10:50:47 +0100
+Subject: media: v4l2-cci: Add support for little-endian encoded registers
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit d92e7a013ff33f4e0b31bbf768d0c85a8acefebf ]
+
+Some sensors, e.g. Sony IMX290, are using little-endian registers. Add
+support for those by encoding the endianness into Bit 20 of the register
+address.
+
+Fixes: af73323b9770 ("media: imx290: Convert to new CCI register access helpers")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+[Sakari Ailus: Fixed commit message.]
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/v4l2-core/v4l2-cci.c | 44 ++++++++++++++++++++++++------
+ include/media/v4l2-cci.h | 5 ++++
+ 2 files changed, 41 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
+index 3179160abde3..10005c80f43b 100644
+--- a/drivers/media/v4l2-core/v4l2-cci.c
++++ b/drivers/media/v4l2-core/v4l2-cci.c
+@@ -18,6 +18,7 @@
+
+ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ {
++ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+@@ -25,6 +26,7 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ if (err && *err)
+ return *err;
+
++ little_endian = reg & CCI_REG_LE;
+ len = CCI_REG_WIDTH_BYTES(reg);
+ reg = CCI_REG_ADDR(reg);
+
+@@ -40,16 +42,28 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ *val = buf[0];
+ break;
+ case 2:
+- *val = get_unaligned_be16(buf);
++ if (little_endian)
++ *val = get_unaligned_le16(buf);
++ else
++ *val = get_unaligned_be16(buf);
+ break;
+ case 3:
+- *val = get_unaligned_be24(buf);
++ if (little_endian)
++ *val = get_unaligned_le24(buf);
++ else
++ *val = get_unaligned_be24(buf);
+ break;
+ case 4:
+- *val = get_unaligned_be32(buf);
++ if (little_endian)
++ *val = get_unaligned_le32(buf);
++ else
++ *val = get_unaligned_be32(buf);
+ break;
+ case 8:
+- *val = get_unaligned_be64(buf);
++ if (little_endian)
++ *val = get_unaligned_le64(buf);
++ else
++ *val = get_unaligned_be64(buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+@@ -68,6 +82,7 @@ EXPORT_SYMBOL_GPL(cci_read);
+
+ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ {
++ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+@@ -75,6 +90,7 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ if (err && *err)
+ return *err;
+
++ little_endian = reg & CCI_REG_LE;
+ len = CCI_REG_WIDTH_BYTES(reg);
+ reg = CCI_REG_ADDR(reg);
+
+@@ -83,16 +99,28 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ buf[0] = val;
+ break;
+ case 2:
+- put_unaligned_be16(val, buf);
++ if (little_endian)
++ put_unaligned_le16(val, buf);
++ else
++ put_unaligned_be16(val, buf);
+ break;
+ case 3:
+- put_unaligned_be24(val, buf);
++ if (little_endian)
++ put_unaligned_le24(val, buf);
++ else
++ put_unaligned_be24(val, buf);
+ break;
+ case 4:
+- put_unaligned_be32(val, buf);
++ if (little_endian)
++ put_unaligned_le32(val, buf);
++ else
++ put_unaligned_be32(val, buf);
+ break;
+ case 8:
+- put_unaligned_be64(val, buf);
++ if (little_endian)
++ put_unaligned_le64(val, buf);
++ else
++ put_unaligned_be64(val, buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
+index a2835a663df5..8b0b361b464c 100644
+--- a/include/media/v4l2-cci.h
++++ b/include/media/v4l2-cci.h
+@@ -38,12 +38,17 @@ struct cci_reg_sequence {
+ #define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x)
+ #define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3)
+ #define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x)
++#define CCI_REG_LE BIT(20)
+
+ #define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x))
+
+ /**
+ * cci_read() - Read a value from a single CCI register
+--
+2.43.0
+
--- /dev/null
+From 85ad3576eca838e9ed4e3ea9494c6283b0c26a5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Dec 2023 20:07:52 +0800
+Subject: mm: migrate: fix getting incorrect page mapping during page migration
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+[ Upstream commit d1adb25df7111de83b64655a80b5a135adbded61 ]
+
+When running stress-ng testing, we found below kernel crash after a few hours:
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
+pc : dentry_name+0xd8/0x224
+lr : pointer+0x22c/0x370
+sp : ffff800025f134c0
+......
+Call trace:
+ dentry_name+0xd8/0x224
+ pointer+0x22c/0x370
+ vsnprintf+0x1ec/0x730
+ vscnprintf+0x2c/0x60
+ vprintk_store+0x70/0x234
+ vprintk_emit+0xe0/0x24c
+ vprintk_default+0x3c/0x44
+ vprintk_func+0x84/0x2d0
+ printk+0x64/0x88
+ __dump_page+0x52c/0x530
+ dump_page+0x14/0x20
+ set_migratetype_isolate+0x110/0x224
+ start_isolate_page_range+0xc4/0x20c
+ offline_pages+0x124/0x474
+ memory_block_offline+0x44/0xf4
+ memory_subsys_offline+0x3c/0x70
+ device_offline+0xf0/0x120
+ ......
+
+After analyzing the vmcore, I found this issue is caused by page migration.
+The scenario is that, one thread is doing page migration, and we will use the
+target page's ->mapping field to save 'anon_vma' pointer between page unmap and
+page move, and now the target page is locked and refcount is 1.
+
+Currently, there is another stress-ng thread performing memory hotplug,
+attempting to offline the target page that is being migrated. It discovers that
+the refcount of this target page is 1, preventing the offline operation, thus
+proceeding to dump the page. However, page_mapping() of the target page may
+return an incorrect file mapping to crash the system in dump_mapping(), since
+the target page->mapping only saves 'anon_vma' pointer without setting
+PAGE_MAPPING_ANON flag.
+
+There are seveval ways to fix this issue:
+(1) Setting the PAGE_MAPPING_ANON flag for target page's ->mapping when saving
+'anon_vma', but this can confuse PageAnon() for PFN walkers, since the target
+page has not built mappings yet.
+(2) Getting the page lock to call page_mapping() in __dump_page() to avoid crashing
+the system, however, there are still some PFN walkers that call page_mapping()
+without holding the page lock, such as compaction.
+(3) Using target page->private field to save the 'anon_vma' pointer and 2 bits
+page state, just as page->mapping records an anonymous page, which can remove
+the page_mapping() impact for PFN walkers and also seems a simple way.
+
+So I choose option 3 to fix this issue, and this can also fix other potential
+issues for PFN walkers, such as compaction.
+
+Link: https://lkml.kernel.org/r/e60b17a88afc38cb32f84c3e30837ec70b343d2b.1702641709.git.baolin.wang@linux.alibaba.com
+Fixes: 64c8902ed441 ("migrate_pages: split unmap_and_move() to _unmap() and _move()")
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Xu Yu <xuyu@linux.alibaba.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/migrate.c | 27 ++++++++++-----------------
+ 1 file changed, 10 insertions(+), 17 deletions(-)
+
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 3373fc1c2d0f..b4d972d80b10 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1026,38 +1026,31 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
+ }
+
+ /*
+- * To record some information during migration, we use some unused
+- * fields (mapping and private) of struct folio of the newly allocated
+- * destination folio. This is safe because nobody is using them
+- * except us.
++ * To record some information during migration, we use unused private
++ * field of struct folio of the newly allocated destination folio.
++ * This is safe because nobody is using it except us.
+ */
+-union migration_ptr {
+- struct anon_vma *anon_vma;
+- struct address_space *mapping;
+-};
+-
+ enum {
+ PAGE_WAS_MAPPED = BIT(0),
+ PAGE_WAS_MLOCKED = BIT(1),
++ PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
+ };
+
+ static void __migrate_folio_record(struct folio *dst,
+- unsigned long old_page_state,
++ int old_page_state,
+ struct anon_vma *anon_vma)
+ {
+- union migration_ptr ptr = { .anon_vma = anon_vma };
+- dst->mapping = ptr.mapping;
+- dst->private = (void *)old_page_state;
++ dst->private = (void *)anon_vma + old_page_state;
+ }
+
+ static void __migrate_folio_extract(struct folio *dst,
+ int *old_page_state,
+ struct anon_vma **anon_vmap)
+ {
+- union migration_ptr ptr = { .mapping = dst->mapping };
+- *anon_vmap = ptr.anon_vma;
+- *old_page_state = (unsigned long)dst->private;
+- dst->mapping = NULL;
++ unsigned long private = (unsigned long)dst->private;
++
++ *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
++ *old_page_state = private & PAGE_OLD_STATES;
+ dst->private = NULL;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From dde8f0daf174b28516ba76ada18614d6f9c62f31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Oct 2023 12:33:22 +0800
+Subject: mm: migrate: record the mlocked page status to remove unnecessary lru
+ drain
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+[ Upstream commit eebb3dabbb5cc590afe32880b5d3726d0fbf88db ]
+
+When doing compaction, I found the lru_add_drain() is an obvious hotspot
+when migrating pages. The distribution of this hotspot is as follows:
+ - 18.75% compact_zone
+ - 17.39% migrate_pages
+ - 13.79% migrate_pages_batch
+ - 11.66% migrate_folio_move
+ - 7.02% lru_add_drain
+ + 7.02% lru_add_drain_cpu
+ + 3.00% move_to_new_folio
+ 1.23% rmap_walk
+ + 1.92% migrate_folio_unmap
+ + 3.20% migrate_pages_sync
+ + 0.90% isolate_migratepages
+
+The lru_add_drain() was added by commit c3096e6782b7 ("mm/migrate:
+__unmap_and_move() push good newpage to LRU") to drain the newpage to LRU
+immediately, to help to build up the correct newpage->mlock_count in
+remove_migration_ptes() for mlocked pages. However, if there are no
+mlocked pages are migrating, then we can avoid this lru drain operation,
+especailly for the heavy concurrent scenarios.
+
+So we can record the source pages' mlocked status in
+migrate_folio_unmap(), and only drain the lru list when the mlocked status
+is set in migrate_folio_move().
+
+In addition, the page was already isolated from lru when migrating, so
+checking the mlocked status is stable by folio_test_mlocked() in
+migrate_folio_unmap().
+
+After this patch, I can see the hotpot of the lru_add_drain() is gone:
+ - 9.41% migrate_pages_batch
+ - 6.15% migrate_folio_move
+ - 3.64% move_to_new_folio
+ + 1.80% migrate_folio_extra
+ + 1.70% buffer_migrate_folio
+ + 1.41% rmap_walk
+ + 0.62% folio_add_lru
+ + 3.07% migrate_folio_unmap
+
+Meanwhile, the compaction latency shows some improvements when running
+thpscale:
+ base patched
+Amean fault-both-1 1131.22 ( 0.00%) 1112.55 * 1.65%*
+Amean fault-both-3 2489.75 ( 0.00%) 2324.15 * 6.65%*
+Amean fault-both-5 3257.37 ( 0.00%) 3183.18 * 2.28%*
+Amean fault-both-7 4257.99 ( 0.00%) 4079.04 * 4.20%*
+Amean fault-both-12 6614.02 ( 0.00%) 6075.60 * 8.14%*
+Amean fault-both-18 10607.78 ( 0.00%) 8978.86 * 15.36%*
+Amean fault-both-24 14911.65 ( 0.00%) 11619.55 * 22.08%*
+Amean fault-both-30 14954.67 ( 0.00%) 14925.66 * 0.19%*
+Amean fault-both-32 16654.87 ( 0.00%) 15580.31 * 6.45%*
+
+Link: https://lkml.kernel.org/r/06e9153a7a4850352ec36602df3a3a844de45698.1697859741.git.baolin.wang@linux.alibaba.com
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Yin Fengwei <fengwei.yin@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: d1adb25df711 ("mm: migrate: fix getting incorrect page mapping during page migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/migrate.c | 48 +++++++++++++++++++++++++++++-------------------
+ 1 file changed, 29 insertions(+), 19 deletions(-)
+
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 03bc2063ac87..3373fc1c2d0f 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1035,22 +1035,28 @@ union migration_ptr {
+ struct anon_vma *anon_vma;
+ struct address_space *mapping;
+ };
++
++enum {
++ PAGE_WAS_MAPPED = BIT(0),
++ PAGE_WAS_MLOCKED = BIT(1),
++};
++
+ static void __migrate_folio_record(struct folio *dst,
+- unsigned long page_was_mapped,
++ unsigned long old_page_state,
+ struct anon_vma *anon_vma)
+ {
+ union migration_ptr ptr = { .anon_vma = anon_vma };
+ dst->mapping = ptr.mapping;
+- dst->private = (void *)page_was_mapped;
++ dst->private = (void *)old_page_state;
+ }
+
+ static void __migrate_folio_extract(struct folio *dst,
+- int *page_was_mappedp,
++ int *old_page_state,
+ struct anon_vma **anon_vmap)
+ {
+ union migration_ptr ptr = { .mapping = dst->mapping };
+ *anon_vmap = ptr.anon_vma;
+- *page_was_mappedp = (unsigned long)dst->private;
++ *old_page_state = (unsigned long)dst->private;
+ dst->mapping = NULL;
+ dst->private = NULL;
+ }
+@@ -1111,7 +1117,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ {
+ struct folio *dst;
+ int rc = -EAGAIN;
+- int page_was_mapped = 0;
++ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+ bool is_lru = !__PageMovable(&src->page);
+ bool locked = false;
+@@ -1165,6 +1171,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ folio_lock(src);
+ }
+ locked = true;
++ if (folio_test_mlocked(src))
++ old_page_state |= PAGE_WAS_MLOCKED;
+
+ if (folio_test_writeback(src)) {
+ /*
+@@ -1214,7 +1222,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ dst_locked = true;
+
+ if (unlikely(!is_lru)) {
+- __migrate_folio_record(dst, page_was_mapped, anon_vma);
++ __migrate_folio_record(dst, old_page_state, anon_vma);
+ return MIGRATEPAGE_UNMAP;
+ }
+
+@@ -1240,11 +1248,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ VM_BUG_ON_FOLIO(folio_test_anon(src) &&
+ !folio_test_ksm(src) && !anon_vma, src);
+ try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
+- page_was_mapped = 1;
++ old_page_state |= PAGE_WAS_MAPPED;
+ }
+
+ if (!folio_mapped(src)) {
+- __migrate_folio_record(dst, page_was_mapped, anon_vma);
++ __migrate_folio_record(dst, old_page_state, anon_vma);
+ return MIGRATEPAGE_UNMAP;
+ }
+
+@@ -1256,7 +1264,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ if (rc == -EAGAIN)
+ ret = NULL;
+
+- migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
++ migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
++ anon_vma, locked, ret);
+ migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
+
+ return rc;
+@@ -1269,12 +1278,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ struct list_head *ret)
+ {
+ int rc;
+- int page_was_mapped = 0;
++ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+ bool is_lru = !__PageMovable(&src->page);
+ struct list_head *prev;
+
+- __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
++ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ prev = dst->lru.prev;
+ list_del(&dst->lru);
+
+@@ -1295,10 +1304,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ * isolated from the unevictable LRU: but this case is the easiest.
+ */
+ folio_add_lru(dst);
+- if (page_was_mapped)
++ if (old_page_state & PAGE_WAS_MLOCKED)
+ lru_add_drain();
+
+- if (page_was_mapped)
++ if (old_page_state & PAGE_WAS_MAPPED)
+ remove_migration_ptes(src, dst, false);
+
+ out_unlock_both:
+@@ -1330,11 +1339,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ */
+ if (rc == -EAGAIN) {
+ list_add(&dst->lru, prev);
+- __migrate_folio_record(dst, page_was_mapped, anon_vma);
++ __migrate_folio_record(dst, old_page_state, anon_vma);
+ return rc;
+ }
+
+- migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
++ migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
++ anon_vma, true, ret);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
+
+ return rc;
+@@ -1802,12 +1812,12 @@ static int migrate_pages_batch(struct list_head *from,
+ dst = list_first_entry(&dst_folios, struct folio, lru);
+ dst2 = list_next_entry(dst, lru);
+ list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
+- int page_was_mapped = 0;
++ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+
+- __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
+- migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
+- true, ret_folios);
++ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
++ migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
++ anon_vma, true, ret_folios);
+ list_del(&dst->lru);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
+ dst = dst2;
+--
+2.43.0
+
--- /dev/null
+From 9d5c6d4321db4ed56d305cdc5e173ac6d5f53d42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Dec 2023 11:11:28 +0100
+Subject: pipe: wakeup wr_wait after setting max_usage
+
+From: Lukas Schauer <lukas@schauer.dev>
+
+[ Upstream commit e95aada4cb93d42e25c30a0ef9eb2923d9711d4a ]
+
+Commit c73be61cede5 ("pipe: Add general notification queue support") a
+regression was introduced that would lock up resized pipes under certain
+conditions. See the reproducer in [1].
+
+The commit resizing the pipe ring size was moved to a different
+function, doing that moved the wakeup for pipe->wr_wait before actually
+raising pipe->max_usage. If a pipe was full before the resize occured it
+would result in the wakeup never actually triggering pipe_write.
+
+Set @max_usage and @nr_accounted before waking writers if this isn't a
+watch queue.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=212295 [1]
+Link: https://lore.kernel.org/r/20231201-orchideen-modewelt-e009de4562c6@brauner
+Fixes: c73be61cede5 ("pipe: Add general notification queue support")
+Reviewed-by: David Howells <dhowells@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Lukas Schauer <lukas@schauer.dev>
+[Christian Brauner <brauner@kernel.org>: rewrite to account for watch queues]
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/pipe.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 603ab19b0861..a234035cc375 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -1305,6 +1305,11 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
+ pipe->tail = tail;
+ pipe->head = head;
+
++ if (!pipe_has_watch_queue(pipe)) {
++ pipe->max_usage = nr_slots;
++ pipe->nr_accounted = nr_slots;
++ }
++
+ spin_unlock_irq(&pipe->rd_wait.lock);
+
+ /* This might have made more room for writers */
+@@ -1356,8 +1361,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
+ if (ret < 0)
+ goto out_revert_acct;
+
+- pipe->max_usage = nr_slots;
+- pipe->nr_accounted = nr_slots;
+ return pipe->max_usage * PAGE_SIZE;
+
+ out_revert_acct:
+--
+2.43.0
+
--- /dev/null
+From 61854d1ea27ed45afb9d284a6fcea30adbdd0d7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 20:43:18 +0206
+Subject: serial: core: Provide port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit b0af4bcb49464c221ad5f95d40f2b1b252ceedcc ]
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+Provide wrapper functions for spin_[un]lock*(port->lock) invocations so
+that the console mechanics can be applied later on at a single place and
+does not require to copy the same logic all over the drivers.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-2-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 9915753037eb ("serial: sc16is7xx: fix unconditional activation of THRI interrupt")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/serial_core.h | 79 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 79 insertions(+)
+
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index bb6f073bc159..f1d5c0d1568c 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -588,6 +588,85 @@ struct uart_port {
+ void *private_data; /* generic platform data pointer */
+ };
+
++/**
++ * uart_port_lock - Lock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock(struct uart_port *up)
++{
++ spin_lock(&up->lock);
++}
++
++/**
++ * uart_port_lock_irq - Lock the UART port and disable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock_irq(struct uart_port *up)
++{
++ spin_lock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ */
++static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ spin_lock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_trylock - Try to lock the UART port
++ * @up: Pointer to UART port structure
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock(struct uart_port *up)
++{
++ return spin_trylock(&up->lock);
++}
++
++/**
++ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ return spin_trylock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_unlock - Unlock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock(struct uart_port *up)
++{
++ spin_unlock(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock_irq(struct uart_port *up)
++{
++ spin_unlock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqrestore - Unlock the UART port, restore interrupts
++ * @up: Pointer to UART port structure
++ * @flags: The saved interrupt flags for restore
++ */
++static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
++{
++ spin_unlock_irqrestore(&up->lock, flags);
++}
++
+ static inline int serial_port_in(struct uart_port *up, int offset)
+ {
+ return up->serial_in(up, offset);
+--
+2.43.0
+
--- /dev/null
+From 7ed0b06857b773a12e579c757d6e9aff74b2ab39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Dec 2023 12:13:53 -0500
+Subject: serial: sc16is7xx: fix unconditional activation of THRI interrupt
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+[ Upstream commit 9915753037eba7135b209fef4f2afeca841af816 ]
+
+Commit cc4c1d05eb10 ("sc16is7xx: Properly resume TX after stop") changed
+behavior to unconditionnaly set the THRI interrupt in sc16is7xx_tx_proc().
+
+For example when sending a 65 bytes message, and assuming the Tx FIFO is
+initially empty, sc16is7xx_handle_tx() will write the first 64 bytes of the
+message to the FIFO and sc16is7xx_tx_proc() will then activate THRI. When
+the THRI IRQ is fired, the driver will write the remaining byte of the
+message to the FIFO, and disable THRI by calling sc16is7xx_stop_tx().
+
+When sending a 2 bytes message, sc16is7xx_handle_tx() will write the 2
+bytes of the message to the FIFO and call sc16is7xx_stop_tx(), disabling
+THRI. After sc16is7xx_handle_tx() exits, control returns to
+sc16is7xx_tx_proc() which will unconditionally set THRI. When the THRI IRQ
+is fired, the driver simply acknowledges the interrupt and does nothing
+more, since all the data has already been written to the FIFO. This results
+in 2 register writes and 4 register reads all for nothing and taking
+precious cycles from the I2C/SPI bus.
+
+Fix this by enabling the THRI interrupt only when we fill the Tx FIFO to
+its maximum capacity and there are remaining bytes to send in the message.
+
+Fixes: cc4c1d05eb10 ("sc16is7xx: Properly resume TX after stop")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://lore.kernel.org/r/20231211171353.2901416-7-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 425093ce3f24..f75b8bceb8ca 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -676,6 +676,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+
+ if (uart_circ_empty(xmit))
+ sc16is7xx_stop_tx(port);
++ else
++ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+ uart_port_unlock_irqrestore(port, flags);
+ }
+
+@@ -802,7 +804,6 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ {
+ struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+- unsigned long flags;
+
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ (port->rs485.delay_rts_before_send > 0))
+@@ -811,10 +812,6 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ mutex_lock(&one->efr_lock);
+ sc16is7xx_handle_tx(port);
+ mutex_unlock(&one->efr_lock);
+-
+- uart_port_lock_irqsave(port, &flags);
+- sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+- uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+--
+2.43.0
+
--- /dev/null
+From 4dfac479a73efc5b3696901ba88061c3f5e38e26 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 20:44:12 +0206
+Subject: serial: sc16is7xx: Use port lock wrappers
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit b465848be8a652e2c5fefe102661fb660cff8497 ]
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-56-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 9915753037eb ("serial: sc16is7xx: fix unconditional activation of THRI interrupt")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/sc16is7xx.c | 40 +++++++++++++++++-----------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 3276d21ec1c6..425093ce3f24 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -642,9 +642,9 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_stop_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+@@ -670,13 +670,13 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ sc16is7xx_fifo_write(port, to_send);
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ sc16is7xx_stop_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
+@@ -707,7 +707,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+
+ one->old_mctrl = status;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
+ port->icount.rng++;
+ if (changed & TIOCM_DSR)
+@@ -718,7 +718,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ uart_handle_cts_change(port, status & TIOCM_CTS);
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+@@ -812,9 +812,9 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ sc16is7xx_handle_tx(port);
+ mutex_unlock(&one->efr_lock);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+@@ -825,14 +825,14 @@ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+ struct serial_rs485 *rs485 = &port->rs485;
+ unsigned long irqflags;
+
+- spin_lock_irqsave(&port->lock, irqflags);
++ uart_port_lock_irqsave(port, &irqflags);
+ if (rs485->flags & SER_RS485_ENABLED) {
+ efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
+
+ if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
+ }
+- spin_unlock_irqrestore(&port->lock, irqflags);
++ uart_port_unlock_irqrestore(port, irqflags);
+
+ sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
+ }
+@@ -843,10 +843,10 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
+ struct sc16is7xx_one_config config;
+ unsigned long irqflags;
+
+- spin_lock_irqsave(&one->port.lock, irqflags);
++ uart_port_lock_irqsave(&one->port, &irqflags);
+ config = one->config;
+ memset(&one->config, 0, sizeof(one->config));
+- spin_unlock_irqrestore(&one->port.lock, irqflags);
++ uart_port_unlock_irqrestore(&one->port, irqflags);
+
+ if (config.flags & SC16IS7XX_RECONF_MD) {
+ u8 mcr = 0;
+@@ -952,18 +952,18 @@ static void sc16is7xx_throttle(struct uart_port *port)
+ * value set in MCR register. Stop reading data from RX FIFO so the
+ * AutoRTS feature will de-activate RTS output.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sc16is7xx_unthrottle(struct uart_port *port)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
+@@ -1101,7 +1101,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ /* Setup baudrate generator */
+ baud = sc16is7xx_set_baud(port, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Update timeout according to new baud rate */
+ uart_update_timeout(port, termios->c_cflag, baud);
+@@ -1109,7 +1109,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ sc16is7xx_enable_ms(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
+@@ -1195,9 +1195,9 @@ static int sc16is7xx_startup(struct uart_port *port)
+ sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
+
+ /* Enable modem status polling */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_enable_ms(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+--
+2.43.0
+
drm-amdgpu-pm-fix-the-power-source-flag-error.patch
drm-amd-display-fix-uninitialized-variable-usage-in-core_link_-read_dpcd-write_dpcd-functions.patch
net-bpf-avoid-unused-sin_addr_len-warning-when-config_cgroup_bpf-is-not-set.patch
+thermal-intel-hfi-refactor-enabling-code-into-helper.patch
+thermal-intel-hfi-disable-an-hfi-instance-when-all-i.patch
+thermal-intel-hfi-add-syscore-callbacks-for-system-w.patch
+fs-pipe-move-check-to-pipe_has_watch_queue.patch
+pipe-wakeup-wr_wait-after-setting-max_usage.patch
+media-v4l-cci-include-linux-bits.h.patch
+media-v4l-cci-add-macros-to-obtain-register-width-an.patch
+media-v4l2-cci-add-support-for-little-endian-encoded.patch
+media-i2c-imx290-properly-encode-registers-as-little.patch
+thermal-trip-drop-redundant-trips-check-from-for_eac.patch
+thermal-core-store-trip-pointer-in-struct-thermal_in.patch
+thermal-gov_power_allocator-avoid-inability-to-reset.patch
+mm-migrate-record-the-mlocked-page-status-to-remove-.patch
+mm-migrate-fix-getting-incorrect-page-mapping-during.patch
+serial-core-provide-port-lock-wrappers.patch
+serial-sc16is7xx-use-port-lock-wrappers.patch
+serial-sc16is7xx-fix-unconditional-activation-of-thr.patch
+btrfs-zoned-factor-out-prepare_allocation_zoned.patch
+btrfs-zoned-optimize-hint-byte-for-zoned-allocator.patch
+drm-i915-lnl-remove-watchdog-timers-for-psr.patch
+drm-i915-psr-only-allow-psr-in-lpsp-mode-on-hsw-non-.patch
+drm-panel-edp-add-auo-b116xtn02-boe-nt116whm-n21-836.patch
+drm-panel-edp-drm-panel-edp-fix-auo-b116xak01-name-a.patch
+drm-panel-edp-drm-panel-edp-fix-auo-b116xtn02-name.patch
+drm-amdgpu-gfx10-set-unord_dispatch-in-compute-mqds.patch
+drm-amdgpu-gfx11-set-unord_dispatch-in-compute-mqds.patch
--- /dev/null
+From aae79a1b632f28c18f975f2dee9e5e0af28a8f04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Sep 2023 19:52:44 +0200
+Subject: thermal: core: Store trip pointer in struct thermal_instance
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 2c7b4bfadef08cc0995c24a7b9eb120fe897165f ]
+
+Replace the integer trip number stored in struct thermal_instance with
+a pointer to the relevant trip and adjust the code using the structure
+in question accordingly.
+
+The main reason for making this change is to allow the trip point to
+cooling device binding code more straightforward, as illustrated by
+subsequent modifications of the ACPI thermal driver, but it also helps
+to clarify the overall design and allows the governor code overhead to
+be reduced (through subsequent modifications).
+
+The only case in which it adds complexity is trip_point_show() that
+needs to walk the trips[] table to find the index of the given trip
+point, but this is not a critical path and the interface that
+trip_point_show() belongs to is problematic anyway (for instance, it
+doesn't cover the case when the same cooling devices is associated
+with multiple trip points).
+
+This is a preliminary change and the affected code will be refined by
+a series of subsequent modifications of thermal governors, the core and
+the ACPI thermal driver.
+
+The general functionality is not expected to be affected by this change.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Stable-dep-of: e95fa7404716 ("thermal: gov_power_allocator: avoid inability to reset a cdev")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/gov_bang_bang.c | 23 ++++++++---------------
+ drivers/thermal/gov_fair_share.c | 5 +++--
+ drivers/thermal/gov_power_allocator.c | 11 ++++++++---
+ drivers/thermal/gov_step_wise.c | 16 +++++++---------
+ drivers/thermal/thermal_core.c | 15 ++++++++++-----
+ drivers/thermal/thermal_core.h | 4 +++-
+ drivers/thermal/thermal_helpers.c | 5 ++++-
+ drivers/thermal/thermal_sysfs.c | 3 ++-
+ drivers/thermal/thermal_trip.c | 15 +++++++++++++++
+ 9 files changed, 60 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
+index 1b121066521f..49cdfaa3a927 100644
+--- a/drivers/thermal/gov_bang_bang.c
++++ b/drivers/thermal/gov_bang_bang.c
+@@ -13,28 +13,21 @@
+
+ #include "thermal_core.h"
+
+-static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
++static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_index)
+ {
+- struct thermal_trip trip;
++ const struct thermal_trip *trip = &tz->trips[trip_index];
+ struct thermal_instance *instance;
+- int ret;
+-
+- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
+- if (ret) {
+- pr_warn_once("Failed to retrieve trip point %d\n", trip_id);
+- return ret;
+- }
+
+- if (!trip.hysteresis)
++ if (!trip->hysteresis)
+ dev_info_once(&tz->device,
+ "Zero hysteresis value for thermal zone %s\n", tz->type);
+
+ dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
+- trip_id, trip.temperature, tz->temperature,
+- trip.hysteresis);
++ trip_index, trip->temperature, tz->temperature,
++ trip->hysteresis);
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+- if (instance->trip != trip_id)
++ if (instance->trip != trip)
+ continue;
+
+ /* in case fan is in initial state, switch the fan off */
+@@ -52,10 +45,10 @@ static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
+ * enable fan when temperature exceeds trip_temp and disable
+ * the fan in case it falls below trip_temp minus hysteresis
+ */
+- if (instance->target == 0 && tz->temperature >= trip.temperature)
++ if (instance->target == 0 && tz->temperature >= trip->temperature)
+ instance->target = 1;
+ else if (instance->target == 1 &&
+- tz->temperature <= trip.temperature - trip.hysteresis)
++ tz->temperature <= trip->temperature - trip->hysteresis)
+ instance->target = 0;
+
+ dev_dbg(&instance->cdev->device, "target=%d\n",
+diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
+index 03c2daeb6ee8..2abeb8979f50 100644
+--- a/drivers/thermal/gov_fair_share.c
++++ b/drivers/thermal/gov_fair_share.c
+@@ -49,7 +49,7 @@ static long get_target_state(struct thermal_zone_device *tz,
+ /**
+ * fair_share_throttle - throttles devices associated with the given zone
+ * @tz: thermal_zone_device
+- * @trip: trip point index
++ * @trip_index: trip point index
+ *
+ * Throttling Logic: This uses three parameters to calculate the new
+ * throttle state of the cooling devices associated with the given zone.
+@@ -65,8 +65,9 @@ static long get_target_state(struct thermal_zone_device *tz,
+ * (Heavily assumes the trip points are in ascending order)
+ * new_state of cooling device = P3 * P2 * P1
+ */
+-static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
++static int fair_share_throttle(struct thermal_zone_device *tz, int trip_index)
+ {
++ const struct thermal_trip *trip = &tz->trips[trip_index];
+ struct thermal_instance *instance;
+ int total_weight = 0;
+ int total_instance = 0;
+diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
+index 8642f1096b91..1faf55446ba2 100644
+--- a/drivers/thermal/gov_power_allocator.c
++++ b/drivers/thermal/gov_power_allocator.c
+@@ -90,12 +90,14 @@ static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
+ u32 sustainable_power = 0;
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
++ const struct thermal_trip *trip_max_desired_temperature =
++ &tz->trips[params->trip_max_desired_temperature];
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ struct thermal_cooling_device *cdev = instance->cdev;
+ u32 min_power;
+
+- if (instance->trip != params->trip_max_desired_temperature)
++ if (instance->trip != trip_max_desired_temperature)
+ continue;
+
+ if (!cdev_is_power_actor(cdev))
+@@ -383,12 +385,13 @@ static int allocate_power(struct thermal_zone_device *tz,
+ {
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
++ const struct thermal_trip *trip_max_desired_temperature =
++ &tz->trips[params->trip_max_desired_temperature];
+ u32 *req_power, *max_power, *granted_power, *extra_actor_power;
+ u32 *weighted_req_power;
+ u32 total_req_power, max_allocatable_power, total_weighted_req_power;
+ u32 total_granted_power, power_range;
+ int i, num_actors, total_weight, ret = 0;
+- int trip_max_desired_temperature = params->trip_max_desired_temperature;
+
+ num_actors = 0;
+ total_weight = 0;
+@@ -564,12 +567,14 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
+ {
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
++ const struct thermal_trip *trip_max_desired_temperature =
++ &tz->trips[params->trip_max_desired_temperature];
+ u32 req_power;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ struct thermal_cooling_device *cdev = instance->cdev;
+
+- if ((instance->trip != params->trip_max_desired_temperature) ||
++ if ((instance->trip != trip_max_desired_temperature) ||
+ (!cdev_is_power_actor(instance->cdev)))
+ continue;
+
+diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
+index 1050fb4d94c2..849dc1ec8d27 100644
+--- a/drivers/thermal/gov_step_wise.c
++++ b/drivers/thermal/gov_step_wise.c
+@@ -81,26 +81,24 @@ static void update_passive_instance(struct thermal_zone_device *tz,
+
+ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
+ {
++ const struct thermal_trip *trip = &tz->trips[trip_id];
+ enum thermal_trend trend;
+ struct thermal_instance *instance;
+- struct thermal_trip trip;
+ bool throttle = false;
+ int old_target;
+
+- __thermal_zone_get_trip(tz, trip_id, &trip);
+-
+ trend = get_tz_trend(tz, trip_id);
+
+- if (tz->temperature >= trip.temperature) {
++ if (tz->temperature >= trip->temperature) {
+ throttle = true;
+- trace_thermal_zone_trip(tz, trip_id, trip.type);
++ trace_thermal_zone_trip(tz, trip_id, trip->type);
+ }
+
+ dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
+- trip_id, trip.type, trip.temperature, trend, throttle);
++ trip_id, trip->type, trip->temperature, trend, throttle);
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+- if (instance->trip != trip_id)
++ if (instance->trip != trip)
+ continue;
+
+ old_target = instance->target;
+@@ -114,11 +112,11 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id
+ /* Activate a passive thermal instance */
+ if (old_target == THERMAL_NO_TARGET &&
+ instance->target != THERMAL_NO_TARGET)
+- update_passive_instance(tz, trip.type, 1);
++ update_passive_instance(tz, trip->type, 1);
+ /* Deactivate a passive thermal instance */
+ else if (old_target != THERMAL_NO_TARGET &&
+ instance->target == THERMAL_NO_TARGET)
+- update_passive_instance(tz, trip.type, -1);
++ update_passive_instance(tz, trip->type, -1);
+
+ instance->initialized = true;
+ mutex_lock(&instance->cdev->lock);
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 2de524fb7be5..1494ffa59754 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -606,7 +606,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
+ /**
+ * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
+ * @tz: pointer to struct thermal_zone_device
+- * @trip: indicates which trip point the cooling devices is
++ * @trip_index: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: pointer to struct thermal_cooling_device
+ * @upper: the Maximum cooling state for this trip point.
+@@ -626,7 +626,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
+ * Return: 0 on success, the proper error value otherwise.
+ */
+ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+- int trip,
++ int trip_index,
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower,
+ unsigned int weight)
+@@ -635,12 +635,15 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ struct thermal_instance *pos;
+ struct thermal_zone_device *pos1;
+ struct thermal_cooling_device *pos2;
++ const struct thermal_trip *trip;
+ bool upper_no_limit;
+ int result;
+
+- if (trip >= tz->num_trips || trip < 0)
++ if (trip_index >= tz->num_trips || trip_index < 0)
+ return -EINVAL;
+
++ trip = &tz->trips[trip_index];
++
+ list_for_each_entry(pos1, &thermal_tz_list, node) {
+ if (pos1 == tz)
+ break;
+@@ -745,7 +748,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
+ * thermal_zone_unbind_cooling_device() - unbind a cooling device from a
+ * thermal zone.
+ * @tz: pointer to a struct thermal_zone_device.
+- * @trip: indicates which trip point the cooling devices is
++ * @trip_index: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: pointer to a struct thermal_cooling_device.
+ *
+@@ -756,13 +759,15 @@ EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
+ * Return: 0 on success, the proper error value otherwise.
+ */
+ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
+- int trip,
++ int trip_index,
+ struct thermal_cooling_device *cdev)
+ {
+ struct thermal_instance *pos, *next;
++ const struct thermal_trip *trip;
+
+ mutex_lock(&tz->lock);
+ mutex_lock(&cdev->lock);
++ trip = &tz->trips[trip_index];
+ list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ list_del(&pos->tz_node);
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index de884bea28b6..024e82ebf592 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -87,7 +87,7 @@ struct thermal_instance {
+ char name[THERMAL_NAME_LENGTH];
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+- int trip;
++ const struct thermal_trip *trip;
+ bool initialized;
+ unsigned long upper; /* Highest cooling state for this trip point */
+ unsigned long lower; /* Lowest cooling state for this trip point */
+@@ -119,6 +119,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
+ void __thermal_zone_set_trips(struct thermal_zone_device *tz);
+ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
+ struct thermal_trip *trip);
++int thermal_zone_trip_id(struct thermal_zone_device *tz,
++ const struct thermal_trip *trip);
+ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
+
+ /* sysfs I/F */
+diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
+index 4d66372c9629..c1d0af73c85d 100644
+--- a/drivers/thermal/thermal_helpers.c
++++ b/drivers/thermal/thermal_helpers.c
+@@ -42,14 +42,17 @@ int get_tz_trend(struct thermal_zone_device *tz, int trip_index)
+
+ struct thermal_instance *
+ get_thermal_instance(struct thermal_zone_device *tz,
+- struct thermal_cooling_device *cdev, int trip)
++ struct thermal_cooling_device *cdev, int trip_index)
+ {
+ struct thermal_instance *pos = NULL;
+ struct thermal_instance *target_instance = NULL;
++ const struct thermal_trip *trip;
+
+ mutex_lock(&tz->lock);
+ mutex_lock(&cdev->lock);
+
++ trip = &tz->trips[trip_index];
++
+ list_for_each_entry(pos, &tz->thermal_instances, tz_node) {
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ target_instance = pos;
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index 4e6a97db894e..eef40d4f3063 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -943,7 +943,8 @@ trip_point_show(struct device *dev, struct device_attribute *attr, char *buf)
+ instance =
+ container_of(attr, struct thermal_instance, attr);
+
+- return sprintf(buf, "%d\n", instance->trip);
++ return sprintf(buf, "%d\n",
++ thermal_zone_trip_id(instance->tz, instance->trip));
+ }
+
+ ssize_t
+diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
+index 4b3a9e77c039..6e5cebd1e63a 100644
+--- a/drivers/thermal/thermal_trip.c
++++ b/drivers/thermal/thermal_trip.c
+@@ -172,3 +172,18 @@ int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
+
+ return 0;
+ }
++
++int thermal_zone_trip_id(struct thermal_zone_device *tz,
++ const struct thermal_trip *trip)
++{
++ int i;
++
++ lockdep_assert_held(&tz->lock);
++
++ for (i = 0; i < tz->num_trips; i++) {
++ if (&tz->trips[i] == trip)
++ return i;
++ }
++
++ return -ENODATA;
++}
+--
+2.43.0
+
--- /dev/null
+From b785a8b6ba0dd845ecca179dc7f45ef3802a81a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jan 2024 19:55:26 +0800
+Subject: thermal: gov_power_allocator: avoid inability to reset a cdev
+
+From: Di Shen <di.shen@unisoc.com>
+
+[ Upstream commit e95fa7404716f6e25021e66067271a4ad8eb1486 ]
+
+Commit 0952177f2a1f ("thermal/core/power_allocator: Update once
+cooling devices when temp is low") adds an update flag to avoid
+triggering a thermal event when there is no need, and the thermal
+cdev is updated once when the temperature is low.
+
+But when the trips are writable, and switch_on_temp is set to be a
+higher value, the cooling device state may not be reset to 0,
+because last_temperature is smaller than switch_on_temp.
+
+For example:
+First:
+switch_on_temp=70 control_temp=85;
+Then userspace change the trip_temp:
+switch_on_temp=45 control_temp=55 cur_temp=54
+
+Then userspace reset the trip_temp:
+switch_on_temp=70 control_temp=85 cur_temp=57 last_temp=54
+
+At this time, the cooling device state should be reset to 0.
+However, because cur_temp(57) < switch_on_temp(70)
+last_temp(54) < switch_on_temp(70) ----> update = false,
+update is false, the cooling device state can not be reset.
+
+Using the observation that tz->passive can also be regarded as the
+temperature status, set the update flag to the tz->passive value.
+
+When the temperature drops below switch_on for the first time, the
+states of cooling devices can be reset once, and tz->passive is updated
+to 0. In the next round, because tz->passive is 0, cdev->state will not
+be updated.
+
+By using the tz->passive value as the "update" flag, the issue above
+can be solved, and the cooling devices can be updated only once when the
+temperature is low.
+
+Fixes: 0952177f2a1f ("thermal/core/power_allocator: Update once cooling devices when temp is low")
+Cc: 5.13+ <stable@vger.kernel.org> # 5.13+
+Suggested-by: Wei Wang <wvw@google.com>
+Signed-off-by: Di Shen <di.shen@unisoc.com>
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+[ rjw: Subject and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/gov_power_allocator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
+index 1faf55446ba2..fc969642f70b 100644
+--- a/drivers/thermal/gov_power_allocator.c
++++ b/drivers/thermal/gov_power_allocator.c
+@@ -715,7 +715,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip_id)
+
+ ret = __thermal_zone_get_trip(tz, params->trip_switch_on, &trip);
+ if (!ret && (tz->temperature < trip.temperature)) {
+- update = (tz->last_temperature >= trip.temperature);
++ update = tz->passive;
+ tz->passive = 0;
+ reset_pid_controller(params);
+ allow_maximum_power(tz, update);
+--
+2.43.0
+
--- /dev/null
+From 3d6444d9d1821b6a9ab4588d3f49297dd1159b41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jan 2024 19:07:04 -0800
+Subject: thermal: intel: hfi: Add syscore callbacks for system-wide PM
+
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+
+[ Upstream commit 97566d09fd02d2ab329774bb89a2cdf2267e86d9 ]
+
+The kernel allocates a memory buffer and provides its location to the
+hardware, which uses it to update the HFI table. This allocation occurs
+during boot and remains constant throughout runtime.
+
+When resuming from hibernation, the restore kernel allocates a second
+memory buffer and reprograms the HFI hardware with the new location as
+part of a normal boot. The location of the second memory buffer may
+differ from the one allocated by the image kernel.
+
+When the restore kernel transfers control to the image kernel, its HFI
+buffer becomes invalid, potentially leading to memory corruption if the
+hardware writes to it (the hardware continues to use the buffer from the
+restore kernel).
+
+It is also possible that the hardware "forgets" the address of the memory
+buffer when resuming from "deep" suspend. Memory corruption may also occur
+in such a scenario.
+
+To prevent the described memory corruption, disable HFI when preparing to
+suspend or hibernate. Enable it when resuming.
+
+Add syscore callbacks to handle the package of the boot CPU (packages of
+non-boot CPUs are handled via CPU offline). Syscore ops always run on the
+boot CPU. Additionally, HFI only needs to be disabled during "deep" suspend
+and hibernation. Syscore ops only run in these cases.
+
+Cc: 6.1+ <stable@vger.kernel.org> # 6.1+
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+[ rjw: Comment adjustment, subject and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/intel/intel_hfi.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index bb25c75acd45..1c5a429b2e3e 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -35,7 +35,9 @@
+ #include <linux/processor.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/suspend.h>
+ #include <linux/string.h>
++#include <linux/syscore_ops.h>
+ #include <linux/topology.h>
+ #include <linux/workqueue.h>
+
+@@ -568,6 +570,30 @@ static __init int hfi_parse_features(void)
+ return 0;
+ }
+
++static void hfi_do_enable(void)
++{
++ /* This code runs only on the boot CPU. */
++ struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
++ struct hfi_instance *hfi_instance = info->hfi_instance;
++
++ /* No locking needed. There is no concurrency with CPU online. */
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
++}
++
++static int hfi_do_disable(void)
++{
++ /* No locking needed. There is no concurrency with CPU offline. */
++ hfi_disable();
++
++ return 0;
++}
++
++static struct syscore_ops hfi_pm_ops = {
++ .resume = hfi_do_enable,
++ .suspend = hfi_do_disable,
++};
++
+ void __init intel_hfi_init(void)
+ {
+ struct hfi_instance *hfi_instance;
+@@ -599,6 +625,8 @@ void __init intel_hfi_init(void)
+ if (!hfi_updates_wq)
+ goto err_nomem;
+
++ register_syscore_ops(&hfi_pm_ops);
++
+ return;
+
+ err_nomem:
+--
+2.43.0
+
--- /dev/null
+From 8266ca6c5688129b63cf0f23cee9a08521e5eb46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jan 2024 20:14:58 -0800
+Subject: thermal: intel: hfi: Disable an HFI instance when all its CPUs go
+ offline
+
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+
+[ Upstream commit 1c53081d773c2cb4461636559b0d55b46559ceec ]
+
+In preparation to support hibernation, add functionality to disable an HFI
+instance during CPU offline. The last CPU of an instance that goes offline
+will disable such instance.
+
+The Intel Software Development Manual states that the operating system must
+wait for the hardware to set MSR_IA32_PACKAGE_THERM_STATUS[26] after
+disabling an HFI instance to ensure that it will no longer write on the HFI
+memory. Some processors, however, do not ever set such bit. Wait a minimum
+of 2ms to give time hardware to complete any pending memory writes.
+
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 97566d09fd02 ("thermal: intel: hfi: Add syscore callbacks for system-wide PM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/intel/intel_hfi.c | 35 +++++++++++++++++++++++++++++++
+ 1 file changed, 35 insertions(+)
+
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index 820613e293cd..bb25c75acd45 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -24,6 +24,7 @@
+ #include <linux/bitops.h>
+ #include <linux/cpufeature.h>
+ #include <linux/cpumask.h>
++#include <linux/delay.h>
+ #include <linux/gfp.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -367,6 +368,32 @@ static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
+ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+ }
+
++/* Caller must hold hfi_instance_lock. */
++static void hfi_disable(void)
++{
++ u64 msr_val;
++ int i;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++
++ /*
++ * Wait for hardware to acknowledge the disabling of HFI. Some
++ * processors may not do it. Wait for ~2ms. This is a reasonable
++ * time for hardware to complete any pending actions on the HFI
++ * memory.
++ */
++ for (i = 0; i < 2000; i++) {
++ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
++ if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
++ break;
++
++ udelay(1);
++ cpu_relax();
++ }
++}
++
+ /**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+@@ -421,6 +448,10 @@ void intel_hfi_online(unsigned int cpu)
+ /*
+ * Hardware is programmed with the physical address of the first page
+ * frame of the table. Hence, the allocated memory must be page-aligned.
++ *
++ * Some processors do not forget the initial address of the HFI table
++ * even after having been reprogrammed. Keep using the same pages. Do
++ * not free them.
+ */
+ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
+ GFP_KERNEL | __GFP_ZERO);
+@@ -485,6 +516,10 @@ void intel_hfi_offline(unsigned int cpu)
+
+ mutex_lock(&hfi_instance_lock);
+ cpumask_clear_cpu(cpu, hfi_instance->cpus);
++
++ if (!cpumask_weight(hfi_instance->cpus))
++ hfi_disable();
++
+ mutex_unlock(&hfi_instance_lock);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 804c20c11ea528ae9975da227bf5227180a085bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jan 2024 20:14:56 -0800
+Subject: thermal: intel: hfi: Refactor enabling code into helper functions
+
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+
+[ Upstream commit 8a8b6bb93c704776c4b05cb517c3fa8baffb72f5 ]
+
+In preparation for the addition of a suspend notifier, wrap the logic to
+enable HFI and program its memory buffer into helper functions. Both the
+CPU hotplug callback and the suspend notifier will use them.
+
+This refactoring does not introduce functional changes.
+
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 97566d09fd02 ("thermal: intel: hfi: Add syscore callbacks for system-wide PM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/intel/intel_hfi.c | 43 ++++++++++++++++---------------
+ 1 file changed, 22 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index c69db6c90869..820613e293cd 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -347,6 +347,26 @@ static void init_hfi_instance(struct hfi_instance *hfi_instance)
+ hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
+ }
+
++/* Caller must hold hfi_instance_lock. */
++static void hfi_enable(void)
++{
++ u64 msr_val;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++}
++
++static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
++{
++ phys_addr_t hw_table_pa;
++ u64 msr_val;
++
++ hw_table_pa = virt_to_phys(hfi_instance->hw_table);
++ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
++}
++
+ /**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+@@ -364,8 +384,6 @@ void intel_hfi_online(unsigned int cpu)
+ {
+ struct hfi_instance *hfi_instance;
+ struct hfi_cpu_info *info;
+- phys_addr_t hw_table_pa;
+- u64 msr_val;
+ u16 die_id;
+
+ /* Nothing to do if hfi_instances are missing. */
+@@ -409,8 +427,6 @@ void intel_hfi_online(unsigned int cpu)
+ if (!hfi_instance->hw_table)
+ goto unlock;
+
+- hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+-
+ /*
+ * Allocate memory to keep a local copy of the table that
+ * hardware generates.
+@@ -420,16 +436,6 @@ void intel_hfi_online(unsigned int cpu)
+ if (!hfi_instance->local_table)
+ goto free_hw_table;
+
+- /*
+- * Program the address of the feedback table of this die/package. On
+- * some processors, hardware remembers the old address of the HFI table
+- * even after having been reprogrammed and re-enabled. Thus, do not free
+- * the pages allocated for the table or reprogram the hardware with a
+- * new base address. Namely, program the hardware only once.
+- */
+- msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+-
+ init_hfi_instance(hfi_instance);
+
+ INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
+@@ -438,13 +444,8 @@ void intel_hfi_online(unsigned int cpu)
+
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+
+- /*
+- * Enable the hardware feedback interface and never disable it. See
+- * comment on programming the address of the table.
+- */
+- rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+- msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
+
+ unlock:
+ mutex_unlock(&hfi_instance_lock);
+--
+2.43.0
+
--- /dev/null
+From 57a3a0e05eee7667cffa717d61b3d92e8005f6ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Sep 2023 20:59:53 +0200
+Subject: thermal: trip: Drop redundant trips check from
+ for_each_thermal_trip()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit a15ffa783ea4210877886c59566a0d20f6b2bc09 ]
+
+It is invalid to call for_each_thermal_trip() on an unregistered thermal
+zone anyway, and as per thermal_zone_device_register_with_trips(), the
+trips[] table must be present if num_trips is greater than zero for the
+given thermal zone.
+
+Hence, the trips check in for_each_thermal_trip() is redundant and so it
+can be dropped.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Stable-dep-of: e95fa7404716 ("thermal: gov_power_allocator: avoid inability to reset a cdev")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/thermal_trip.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
+index 597ac4144e33..4b3a9e77c039 100644
+--- a/drivers/thermal/thermal_trip.c
++++ b/drivers/thermal/thermal_trip.c
+@@ -17,9 +17,6 @@ int for_each_thermal_trip(struct thermal_zone_device *tz,
+
+ lockdep_assert_held(&tz->lock);
+
+- if (!tz->trips)
+- return -ENODATA;
+-
+ for (i = 0; i < tz->num_trips; i++) {
+ ret = cb(&tz->trips[i], data);
+ if (ret)
+--
+2.43.0
+