--- /dev/null
+From 4fa944255be521b1bbd9780383f77206303a3a5c Mon Sep 17 00:00:00 2001
+From: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Date: Tue, 25 Nov 2025 10:48:39 +0100
+Subject: drm/amdgpu: add missing lock to amdgpu_ttm_access_memory_sdma
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+
+commit 4fa944255be521b1bbd9780383f77206303a3a5c upstream.
+
+Users of ttm entities need to hold the gtt_window_lock before using them
+to guarantee proper ordering of jobs.
+
+Cc: stable@vger.kernel.org
+Fixes: cb5cc4f573e1 ("drm/amdgpu: improve debug VRAM access performance using sdma")
+Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1513,6 +1513,7 @@ static int amdgpu_ttm_access_memory_sdma
+ if (r)
+ goto out;
+
++ mutex_lock(&adev->mman.gtt_window_lock);
+ amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
+ src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
+ src_mm.start;
+@@ -1527,6 +1528,7 @@ static int amdgpu_ttm_access_memory_sdma
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ fence = amdgpu_job_submit(job);
++ mutex_unlock(&adev->mman.gtt_window_lock);
+
+ if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
+ r = -ETIMEDOUT;
--- /dev/null
+From 3f2289b56cd98f5741056bdb6e521324eff07ce5 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Nov 2025 15:55:19 -0500
+Subject: drm/amdgpu/gmc11: add amdgpu_vm_handle_fault() handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 3f2289b56cd98f5741056bdb6e521324eff07ce5 upstream.
+
+We need to call amdgpu_vm_handle_fault() on page fault
+on all gfx9 and newer parts to properly update the
+page tables, not just for recoverable page faults.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -103,12 +103,39 @@ static int gmc_v11_0_process_interrupt(s
+ uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
+ AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
+ struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
++ bool retry_fault = !!(entry->src_data[1] & 0x80);
++ bool write_fault = !!(entry->src_data[1] & 0x20);
+ uint32_t status = 0;
+ u64 addr;
+
+ addr = (u64)entry->src_data[0] << 12;
+ addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
++ if (retry_fault) {
++ /* Returning 1 here also prevents sending the IV to the KFD */
++
++ /* Process it only if it's the first fault for this address */
++ if (entry->ih != &adev->irq.ih_soft &&
++ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
++ entry->timestamp))
++ return 1;
++
++ /* Delegate it to a different ring if the hardware hasn't
++ * already done it.
++ */
++ if (entry->ih == &adev->irq.ih) {
++ amdgpu_irq_delegate(adev, entry, 8);
++ return 1;
++ }
++
++ /* Try to handle the recoverable page faults by filling page
++ * tables
++ */
++ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
++ entry->timestamp, write_fault))
++ return 1;
++ }
++
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * Issue a dummy read to wait for the status register to
--- /dev/null
+From ff28ff98db6a8eeb469e02fb8bd1647b353232a9 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Nov 2025 15:57:43 -0500
+Subject: drm/amdgpu/gmc12: add amdgpu_vm_handle_fault() handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit ff28ff98db6a8eeb469e02fb8bd1647b353232a9 upstream.
+
+We need to call amdgpu_vm_handle_fault() on page fault
+on all gfx9 and newer parts to properly update the
+page tables, not just for recoverable page faults.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+@@ -91,6 +91,8 @@ static int gmc_v12_0_process_interrupt(s
+ struct amdgpu_iv_entry *entry)
+ {
+ struct amdgpu_vmhub *hub;
++ bool retry_fault = !!(entry->src_data[1] & 0x80);
++ bool write_fault = !!(entry->src_data[1] & 0x20);
+ uint32_t status = 0;
+ u64 addr;
+
+@@ -102,6 +104,31 @@ static int gmc_v12_0_process_interrupt(s
+ else
+ hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+
++ if (retry_fault) {
++ /* Returning 1 here also prevents sending the IV to the KFD */
++
++ /* Process it only if it's the first fault for this address */
++ if (entry->ih != &adev->irq.ih_soft &&
++ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
++ entry->timestamp))
++ return 1;
++
++ /* Delegate it to a different ring if the hardware hasn't
++ * already done it.
++ */
++ if (entry->ih == &adev->irq.ih) {
++ amdgpu_irq_delegate(adev, entry, 8);
++ return 1;
++ }
++
++ /* Try to handle the recoverable page faults by filling page
++ * tables
++ */
++ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
++ entry->timestamp, write_fault))
++ return 1;
++ }
++
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * Issue a dummy read to wait for the status register to
--- /dev/null
+From cf326449637a566ba98fb82c47d46cd479608c88 Mon Sep 17 00:00:00 2001
+From: Jonathan Kim <jonathan.kim@amd.com>
+Date: Fri, 5 Dec 2025 14:41:08 -0500
+Subject: drm/amdkfd: bump minimum vgpr size for gfx1151
+
+From: Jonathan Kim <jonathan.kim@amd.com>
+
+commit cf326449637a566ba98fb82c47d46cd479608c88 upstream.
+
+GFX1151 has 1.5x the number of available physical VGPRs per SIMD.
+Bump total memory availability for acquire checks on queue creation.
+
+Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit b42f3bf9536c9b710fd1d4deb7d1b0dc819dc72d)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -408,6 +408,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32
+ vgpr_size = 0x80000;
+ else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */
+ gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */
++ gfxv == 110501 || /* GFX_VERSION_GFX1151 */
+ gfxv == 120000 || /* GFX_VERSION_GFX1200 */
+ gfxv == 120001) /* GFX_VERSION_GFX1201 */
+ vgpr_size = 0x60000;
--- /dev/null
+From 8fc2796dea6f1210e1a01573961d5836a7ce531e Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Fri, 5 Dec 2025 12:41:58 -0600
+Subject: drm/amdkfd: Export the cwsr_size and ctl_stack_size to userspace
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 8fc2796dea6f1210e1a01573961d5836a7ce531e upstream.
+
+This is important for userspace to avoid hardcoding VGPR size.
+
+Reviewed-by: Kent Russell <kent.russell@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 71776e0965f9f730af19c5f548827f2a7c91f5a8)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -509,6 +509,10 @@ static ssize_t node_show(struct kobject
+ dev->node_props.num_sdma_queues_per_engine);
+ sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
+ dev->node_props.num_cp_queues);
++ sysfs_show_32bit_prop(buffer, offs, "cwsr_size",
++ dev->node_props.cwsr_size);
++ sysfs_show_32bit_prop(buffer, offs, "ctl_stack_size",
++ dev->node_props.ctl_stack_size);
+
+ if (dev->gpu) {
+ log_max_watch_addr =
--- /dev/null
+From b7851f8c66191cd23a0a08bd484465ad74bbbb7d Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <jay.cornwall@amd.com>
+Date: Fri, 14 Nov 2025 14:32:42 -0600
+Subject: drm/amdkfd: Trap handler support for expert scheduling mode
+
+From: Jay Cornwall <jay.cornwall@amd.com>
+
+commit b7851f8c66191cd23a0a08bd484465ad74bbbb7d upstream.
+
+The trap may be entered with dependency checking disabled.
+Wait for dependency counters and save/restore scheduling mode.
+
+v2:
+
+Use ttmp1 instead of ttmp11. ttmp11 is not zero-initialized.
+While the trap handler does zero this field before use, a user-mode
+second-level trap handler could not rely on this being zero when
+using an older kernel mode driver.
+
+v3:
+
+Use ttmp11 primarily but copy to ttmp1 before jumping to the
+second level trap handler. ttmp1 is inspectable by a debugger.
+Unexpected bits in the unused space may regress existing software.
+
+Signed-off-by: Jay Cornwall <jay.cornwall@amd.com>
+Reviewed-by: Lancelot Six <lancelot.six@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 423888879412e94725ca2bdccd89414887d98e31)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 62 +++++++++--------
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm | 37 ++++++++++
+ 2 files changed, 73 insertions(+), 26 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -3640,14 +3640,18 @@ static const uint32_t cwsr_trap_gfx9_4_3
+ };
+
+ static const uint32_t cwsr_trap_gfx12_hex[] = {
+- 0xbfa00001, 0xbfa002a2,
+- 0xb0804009, 0xb8f8f804,
++ 0xbfa00001, 0xbfa002b2,
++ 0xb0804009, 0xb8eef81a,
++ 0xbf880000, 0xb980081a,
++ 0x00000000, 0xb8f8f804,
++ 0x9177ff77, 0x0c000000,
++ 0x846e9a6e, 0x8c776e77,
+ 0x9178ff78, 0x00008c00,
+ 0xb8fbf811, 0x8b6eff78,
+ 0x00004000, 0xbfa10008,
+ 0x8b6eff7b, 0x00000080,
+ 0xbfa20018, 0x8b6ea07b,
+- 0xbfa20042, 0xbf830010,
++ 0xbfa2004a, 0xbf830010,
+ 0xb8fbf811, 0xbfa0fffb,
+ 0x8b6eff7b, 0x00000bd0,
+ 0xbfa20010, 0xb8eef812,
+@@ -3658,28 +3662,32 @@ static const uint32_t cwsr_trap_gfx12_he
+ 0xf0000000, 0xbfa20005,
+ 0x8b6fff6f, 0x00000200,
+ 0xbfa20002, 0x8b6ea07b,
+- 0xbfa2002c, 0xbefa4d82,
++ 0xbfa20034, 0xbefa4d82,
+ 0xbf8a0000, 0x84fa887a,
+ 0xbf0d8f7b, 0xbfa10002,
+ 0x8c7bff7b, 0xffff0000,
+- 0xf4601bbd, 0xf8000010,
+- 0xbf8a0000, 0x846e976e,
+- 0x9177ff77, 0x00800000,
+- 0x8c776e77, 0xf4603bbd,
+- 0xf8000000, 0xbf8a0000,
+- 0xf4603ebd, 0xf8000008,
+- 0xbf8a0000, 0x8bee6e6e,
+- 0xbfa10001, 0xbe80486e,
+- 0x8b6eff6d, 0xf0000000,
+- 0xbfa20009, 0xb8eef811,
+- 0x8b6eff6e, 0x00000080,
+- 0xbfa20007, 0x8c78ff78,
+- 0x00004000, 0x80ec886c,
+- 0x82ed806d, 0xbfa00002,
+- 0x806c846c, 0x826d806d,
+- 0x8b6dff6d, 0x0000ffff,
+- 0x8bfe7e7e, 0x8bea6a6a,
+- 0x85788978, 0xb9783244,
++ 0x8b6eff77, 0x0c000000,
++ 0x916dff6d, 0x0c000000,
++ 0x8c6d6e6d, 0xf4601bbd,
++ 0xf8000010, 0xbf8a0000,
++ 0x846e976e, 0x9177ff77,
++ 0x00800000, 0x8c776e77,
++ 0xf4603bbd, 0xf8000000,
++ 0xbf8a0000, 0xf4603ebd,
++ 0xf8000008, 0xbf8a0000,
++ 0x8bee6e6e, 0xbfa10001,
++ 0xbe80486e, 0x8b6eff6d,
++ 0xf0000000, 0xbfa20009,
++ 0xb8eef811, 0x8b6eff6e,
++ 0x00000080, 0xbfa20007,
++ 0x8c78ff78, 0x00004000,
++ 0x80ec886c, 0x82ed806d,
++ 0xbfa00002, 0x806c846c,
++ 0x826d806d, 0x8b6dff6d,
++ 0x0000ffff, 0x8bfe7e7e,
++ 0x8bea6a6a, 0x85788978,
++ 0x936eff77, 0x0002001a,
++ 0xb96ef81a, 0xb9783244,
+ 0xbe804a6c, 0xb8faf802,
+ 0xbf0d987a, 0xbfa10001,
+ 0xbfb00000, 0x8b6dff6d,
+@@ -3977,7 +3985,7 @@ static const uint32_t cwsr_trap_gfx12_he
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+- 0xbfa2fff7, 0xbfa0016e,
++ 0xbfa2fff7, 0xbfa00171,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+@@ -4159,10 +4167,12 @@ static const uint32_t cwsr_trap_gfx12_he
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+- 0xb97af804, 0xbe804ec2,
+- 0xbf94fffe, 0xbe804a6c,
++ 0x936eff77, 0x0002001a,
++ 0xb96ef81a, 0xb97af804,
+ 0xbe804ec2, 0xbf94fffe,
+- 0xbfb10000, 0xbf9f0000,
++ 0xbe804a6c, 0xbe804ec2,
++ 0xbf94fffe, 0xbfb10000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
++ 0xbf9f0000, 0x00000000,
+ };
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+@@ -78,9 +78,16 @@ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_
+ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
++
++var SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT = 0
++var SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE = 2
++
+ var BARRIER_STATE_SIGNAL_OFFSET = 16
+ var BARRIER_STATE_VALID_OFFSET = 0
+
++var TTMP11_SCHED_MODE_SHIFT = 26
++var TTMP11_SCHED_MODE_SIZE = 2
++var TTMP11_SCHED_MODE_MASK = 0xC000000
+ var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+ var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
+
+@@ -160,8 +167,19 @@ L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE
+
+ L_SKIP_RESTORE:
++ // Assume most relaxed scheduling mode is set. Save and revert to normal mode.
++ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_SCHED_MODE)
++ s_wait_alu 0
++ s_setreg_imm32_b32 hwreg(HW_REG_WAVE_SCHED_MODE, \
++ SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT, SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE), 0
++
+ s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC
+
++ // Save SCHED_MODE[1:0] into ttmp11[27:26].
++ s_andn2_b32 ttmp11, ttmp11, TTMP11_SCHED_MODE_MASK
++ s_lshl_b32 ttmp2, ttmp2, TTMP11_SCHED_MODE_SHIFT
++ s_or_b32 ttmp11, ttmp11, ttmp2
++
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
+@@ -238,6 +256,13 @@ L_FETCH_2ND_TRAP:
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+ L_NO_SIGN_EXTEND_TMA:
++#if ASIC_FAMILY == CHIP_GFX12
++ // Move SCHED_MODE[1:0] from ttmp11 to unused bits in ttmp1[27:26] (return PC_HI).
++ // The second-level trap will restore from ttmp1 for backwards compatibility.
++ s_and_b32 ttmp2, ttmp11, TTMP11_SCHED_MODE_MASK
++ s_andn2_b32 ttmp1, ttmp1, TTMP11_SCHED_MODE_MASK
++ s_or_b32 ttmp1, ttmp1, ttmp2
++#endif
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag
+ s_wait_idle
+@@ -287,6 +312,10 @@ L_EXIT_TRAP:
+ // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+ // Only restore fields which the trap handler changes.
+ s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
++
++ // Assume relaxed scheduling mode after this point.
++ restore_sched_mode(ttmp2)
++
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
+
+@@ -1043,6 +1072,9 @@ L_SKIP_BARRIER_RESTORE:
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
++ // Assume relaxed scheduling mode after this point.
++ restore_sched_mode(s_restore_tmp)
++
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu
+
+ // Make barrier and LDS state visible to all waves in the group.
+@@ -1134,3 +1166,8 @@ function valu_sgpr_hazard
+ end
+ #endif
+ end
++
++function restore_sched_mode(s_tmp)
++ s_bfe_u32 s_tmp, ttmp11, (TTMP11_SCHED_MODE_SHIFT | (TTMP11_SCHED_MODE_SIZE << 0x10))
++ s_setreg_b32 hwreg(HW_REG_WAVE_SCHED_MODE), s_tmp
++end
--- /dev/null
+From c178e534fff1d5a74da80ea03b20e2b948a00113 Mon Sep 17 00:00:00 2001
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Date: Mon, 6 Oct 2025 15:21:22 +0530
+Subject: drm/buddy: Optimize free block management with RB tree
+
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+
+commit c178e534fff1d5a74da80ea03b20e2b948a00113 upstream.
+
+Replace the freelist (O(n)) used for free block management with a
+red-black tree, providing more efficient O(log n) search, insert,
+and delete operations. This improves scalability and performance
+when managing large numbers of free blocks per order (e.g., hundreds
+or thousands).
+
+In the VK-CTS memory stress subtest, the buddy manager merges
+fragmented memory and inserts freed blocks into the freelist. Since
+freelist insertion is O(n), this becomes a bottleneck as fragmentation
+increases. Benchmarking shows list_insert_sorted() consumes ~52.69% CPU
+with the freelist, compared to just 0.03% with the RB tree
+(rbtree_insert.isra.0), despite performing the same sorted insert.
+
+This also improves performance in heavily fragmented workloads,
+such as games or graphics tests that stress memory.
+
+As the buddy allocator evolves with new features such as clear-page
+tracking, the resulting fragmentation and complexity have grown.
+These RB-tree based design changes are introduced to address that
+growth and ensure the allocator continues to perform efficiently
+under fragmented conditions.
+
+The RB tree implementation with separate clear/dirty trees provides:
+- O(n log n) aggregate complexity for all operations instead of O(n^2)
+- Elimination of soft lockups and system instability
+- Improved code maintainability and clarity
+- Better scalability for large memory systems
+- Predictable performance under fragmentation
+
+v3(Matthew):
+ - Remove RB_EMPTY_NODE check in force_merge function.
+ - Rename rb for loop macros to have less generic names and move to
+ .c file.
+ - Make the rb node rb and link field as union.
+
+v4(Jani Nikula):
+ - The kernel-doc comment should be "/**"
+ - Move all the rbtree macros to rbtree.h and add parens to ensure
+ correct precedence.
+
+v5:
+ - Remove the inline in a .c file (Jani Nikula).
+
+v6(Peter Zijlstra):
+ - Add rb_add() function replacing the existing rbtree_insert() code.
+
+v7:
+ - A full walk iteration in rbtree is slower than the list (Peter Zijlstra).
+ - The existing rbtree_postorder_for_each_entry_safe macro should be used
+ in scenarios where traversal order is not a critical factor (Christian).
+
+v8(Matthew):
+ - Remove the rbtree_is_empty() check in this patch as well.
+
+Cc: stable@vger.kernel.org
+Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality")
+Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20251006095124.1663-1-Arunpravin.PaneerSelvam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_buddy.c | 195 ++++++++++++++++++++++++++------------------
+ include/drm/drm_buddy.h | 11 +-
+ 2 files changed, 126 insertions(+), 80 deletions(-)
+
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -11,6 +11,8 @@
+
+ static struct kmem_cache *slab_blocks;
+
++#define rbtree_get_free_block(node) rb_entry((node), struct drm_buddy_block, rb)
++
+ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
+ struct drm_buddy_block *parent,
+ unsigned int order,
+@@ -28,6 +30,8 @@ static struct drm_buddy_block *drm_block
+ block->header |= order;
+ block->parent = parent;
+
++ RB_CLEAR_NODE(&block->rb);
++
+ BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
+ return block;
+ }
+@@ -38,23 +42,49 @@ static void drm_block_free(struct drm_bu
+ kmem_cache_free(slab_blocks, block);
+ }
+
+-static void list_insert_sorted(struct drm_buddy *mm,
+- struct drm_buddy_block *block)
++static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
++ const struct drm_buddy_block *node)
+ {
+- struct drm_buddy_block *node;
+- struct list_head *head;
++ return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
++}
+
+- head = &mm->free_list[drm_buddy_block_order(block)];
+- if (list_empty(head)) {
+- list_add(&block->link, head);
+- return;
+- }
++static bool rbtree_block_offset_less(struct rb_node *block,
++ const struct rb_node *node)
++{
++ return drm_buddy_block_offset_less(rbtree_get_free_block(block),
++ rbtree_get_free_block(node));
++}
+
+- list_for_each_entry(node, head, link)
+- if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
+- break;
++static void rbtree_insert(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
++{
++ rb_add(&block->rb,
++ &mm->free_tree[drm_buddy_block_order(block)],
++ rbtree_block_offset_less);
++}
++
++static void rbtree_remove(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
++{
++ struct rb_root *root;
++
++ root = &mm->free_tree[drm_buddy_block_order(block)];
++ rb_erase(&block->rb, root);
++
++ RB_CLEAR_NODE(&block->rb);
++}
++
++static struct drm_buddy_block *
++rbtree_last_entry(struct drm_buddy *mm, unsigned int order)
++{
++ struct rb_node *node = rb_last(&mm->free_tree[order]);
++
++ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
++}
+
+- __list_add(&block->link, node->link.prev, &node->link);
++static bool rbtree_is_empty(struct drm_buddy *mm, unsigned int order)
++{
++ return RB_EMPTY_ROOT(&mm->free_tree[order]);
+ }
+
+ static void clear_reset(struct drm_buddy_block *block)
+@@ -67,12 +97,13 @@ static void mark_cleared(struct drm_budd
+ block->header |= DRM_BUDDY_HEADER_CLEAR;
+ }
+
+-static void mark_allocated(struct drm_buddy_block *block)
++static void mark_allocated(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
+ {
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_ALLOCATED;
+
+- list_del(&block->link);
++ rbtree_remove(mm, block);
+ }
+
+ static void mark_free(struct drm_buddy *mm,
+@@ -81,15 +112,16 @@ static void mark_free(struct drm_buddy *
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_FREE;
+
+- list_insert_sorted(mm, block);
++ rbtree_insert(mm, block);
+ }
+
+-static void mark_split(struct drm_buddy_block *block)
++static void mark_split(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
+ {
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_SPLIT;
+
+- list_del(&block->link);
++ rbtree_remove(mm, block);
+ }
+
+ static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+@@ -145,7 +177,7 @@ static unsigned int __drm_buddy_free(str
+ mark_cleared(parent);
+ }
+
+- list_del(&buddy->link);
++ rbtree_remove(mm, buddy);
+ if (force_merge && drm_buddy_block_is_clear(buddy))
+ mm->clear_avail -= drm_buddy_block_size(mm, buddy);
+
+@@ -176,13 +208,19 @@ static int __force_merge(struct drm_budd
+ return -EINVAL;
+
+ for (i = min_order - 1; i >= 0; i--) {
+- struct drm_buddy_block *block, *prev;
++ struct rb_root *root = &mm->free_tree[i];
++ struct rb_node *iter;
++
++ iter = rb_last(root);
+
+- list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
+- struct drm_buddy_block *buddy;
++ while (iter) {
++ struct drm_buddy_block *block, *buddy;
+ u64 block_start, block_end;
+
+- if (!block->parent)
++ block = rbtree_get_free_block(iter);
++ iter = rb_prev(iter);
++
++ if (!block || !block->parent)
+ continue;
+
+ block_start = drm_buddy_block_offset(block);
+@@ -198,15 +236,10 @@ static int __force_merge(struct drm_budd
+ WARN_ON(drm_buddy_block_is_clear(block) ==
+ drm_buddy_block_is_clear(buddy));
+
+- /*
+- * If the prev block is same as buddy, don't access the
+- * block in the next iteration as we would free the
+- * buddy block as part of the free function.
+- */
+- if (prev == buddy)
+- prev = list_prev_entry(prev, link);
++ if (iter == &buddy->rb)
++ iter = rb_prev(iter);
+
+- list_del(&block->link);
++ rbtree_remove(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+@@ -234,7 +267,7 @@ static int __force_merge(struct drm_budd
+ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+ {
+ unsigned int i;
+- u64 offset;
++ u64 offset = 0;
+
+ if (size < chunk_size)
+ return -EINVAL;
+@@ -255,14 +288,14 @@ int drm_buddy_init(struct drm_buddy *mm,
+
+ BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
+
+- mm->free_list = kmalloc_array(mm->max_order + 1,
+- sizeof(struct list_head),
++ mm->free_tree = kmalloc_array(mm->max_order + 1,
++ sizeof(struct rb_root),
+ GFP_KERNEL);
+- if (!mm->free_list)
++ if (!mm->free_tree)
+ return -ENOMEM;
+
+ for (i = 0; i <= mm->max_order; ++i)
+- INIT_LIST_HEAD(&mm->free_list[i]);
++ mm->free_tree[i] = RB_ROOT;
+
+ mm->n_roots = hweight64(size);
+
+@@ -270,9 +303,8 @@ int drm_buddy_init(struct drm_buddy *mm,
+ sizeof(struct drm_buddy_block *),
+ GFP_KERNEL);
+ if (!mm->roots)
+- goto out_free_list;
++ goto out_free_tree;
+
+- offset = 0;
+ i = 0;
+
+ /*
+@@ -309,8 +341,8 @@ out_free_roots:
+ while (i--)
+ drm_block_free(mm, mm->roots[i]);
+ kfree(mm->roots);
+-out_free_list:
+- kfree(mm->free_list);
++out_free_tree:
++ kfree(mm->free_tree);
+ return -ENOMEM;
+ }
+ EXPORT_SYMBOL(drm_buddy_init);
+@@ -320,7 +352,7 @@ EXPORT_SYMBOL(drm_buddy_init);
+ *
+ * @mm: DRM buddy manager to free
+ *
+- * Cleanup memory manager resources and the freelist
++ * Cleanup memory manager resources and the freetree
+ */
+ void drm_buddy_fini(struct drm_buddy *mm)
+ {
+@@ -345,7 +377,7 @@ void drm_buddy_fini(struct drm_buddy *mm
+ WARN_ON(mm->avail != mm->size);
+
+ kfree(mm->roots);
+- kfree(mm->free_list);
++ kfree(mm->free_tree);
+ }
+ EXPORT_SYMBOL(drm_buddy_fini);
+
+@@ -378,7 +410,7 @@ static int split_block(struct drm_buddy
+ clear_reset(block);
+ }
+
+- mark_split(block);
++ mark_split(mm, block);
+
+ return 0;
+ }
+@@ -407,7 +439,7 @@ EXPORT_SYMBOL(drm_get_buddy);
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+- * in the freelist.
++ * in the freetree.
+ */
+ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+ {
+@@ -426,9 +458,9 @@ void drm_buddy_reset_clear(struct drm_bu
+ }
+
+ for (i = 0; i <= mm->max_order; ++i) {
+- struct drm_buddy_block *block;
++ struct drm_buddy_block *block, *tmp;
+
+- list_for_each_entry_reverse(block, &mm->free_list[i], link) {
++ rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[i], rb) {
+ if (is_clear != drm_buddy_block_is_clear(block)) {
+ if (is_clear) {
+ mark_cleared(block);
+@@ -634,14 +666,18 @@ get_maxblock(struct drm_buddy *mm, unsig
+ unsigned int i;
+
+ for (i = order; i <= mm->max_order; ++i) {
++ struct rb_node *iter = rb_last(&mm->free_tree[i]);
+ struct drm_buddy_block *tmp_block;
+
+- list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
+- if (block_incompatible(tmp_block, flags))
+- continue;
++ while (iter) {
++ tmp_block = rbtree_get_free_block(iter);
+
+- block = tmp_block;
+- break;
++ if (!block_incompatible(tmp_block, flags)) {
++ block = tmp_block;
++ break;
++ }
++
++ iter = rb_prev(iter);
+ }
+
+ if (!block)
+@@ -662,7 +698,7 @@ get_maxblock(struct drm_buddy *mm, unsig
+ }
+
+ static struct drm_buddy_block *
+-alloc_from_freelist(struct drm_buddy *mm,
++alloc_from_freetree(struct drm_buddy *mm,
+ unsigned int order,
+ unsigned long flags)
+ {
+@@ -677,14 +713,18 @@ alloc_from_freelist(struct drm_buddy *mm
+ tmp = drm_buddy_block_order(block);
+ } else {
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
++ struct rb_node *iter = rb_last(&mm->free_tree[tmp]);
+ struct drm_buddy_block *tmp_block;
+
+- list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
+- if (block_incompatible(tmp_block, flags))
+- continue;
++ while (iter) {
++ tmp_block = rbtree_get_free_block(iter);
+
+- block = tmp_block;
+- break;
++ if (!block_incompatible(tmp_block, flags)) {
++ block = tmp_block;
++ break;
++ }
++
++ iter = rb_prev(iter);
+ }
+
+ if (block)
+@@ -695,13 +735,9 @@ alloc_from_freelist(struct drm_buddy *mm
+ if (!block) {
+ /* Fallback method */
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+- if (!list_empty(&mm->free_list[tmp])) {
+- block = list_last_entry(&mm->free_list[tmp],
+- struct drm_buddy_block,
+- link);
+- if (block)
+- break;
+- }
++ block = rbtree_last_entry(mm, tmp);
++ if (block)
++ break;
+ }
+
+ if (!block)
+@@ -766,7 +802,7 @@ static int __alloc_range(struct drm_budd
+
+ if (contains(start, end, block_start, block_end)) {
+ if (drm_buddy_block_is_free(block)) {
+- mark_allocated(block);
++ mark_allocated(mm, block);
+ total_allocated += drm_buddy_block_size(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+@@ -844,8 +880,8 @@ static int __alloc_contig_try_harder(str
+ {
+ u64 rhs_offset, lhs_offset, lhs_size, filled;
+ struct drm_buddy_block *block;
+- struct list_head *list;
+ LIST_HEAD(blocks_lhs);
++ struct rb_node *iter;
+ unsigned long pages;
+ unsigned int order;
+ u64 modify_size;
+@@ -857,11 +893,14 @@ static int __alloc_contig_try_harder(str
+ if (order == 0)
+ return -ENOSPC;
+
+- list = &mm->free_list[order];
+- if (list_empty(list))
++ if (rbtree_is_empty(mm, order))
+ return -ENOSPC;
+
+- list_for_each_entry_reverse(block, list, link) {
++ iter = rb_last(&mm->free_tree[order]);
++
++ while (iter) {
++ block = rbtree_get_free_block(iter);
++
+ /* Allocate blocks traversing RHS */
+ rhs_offset = drm_buddy_block_offset(block);
+ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+@@ -886,6 +925,8 @@ static int __alloc_contig_try_harder(str
+ }
+ /* Free blocks for the next iteration */
+ drm_buddy_free_list_internal(mm, blocks);
++
++ iter = rb_prev(iter);
+ }
+
+ return -ENOSPC;
+@@ -971,7 +1012,7 @@ int drm_buddy_block_trim(struct drm_budd
+ list_add(&block->tmp_link, &dfs);
+ err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
+ if (err) {
+- mark_allocated(block);
++ mark_allocated(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+@@ -994,8 +1035,8 @@ __drm_buddy_alloc_blocks(struct drm_budd
+ return __drm_buddy_alloc_range_bias(mm, start, end,
+ order, flags);
+ else
+- /* Allocate from freelist */
+- return alloc_from_freelist(mm, order, flags);
++ /* Allocate from freetree */
++ return alloc_from_freetree(mm, order, flags);
+ }
+
+ /**
+@@ -1012,8 +1053,8 @@ __drm_buddy_alloc_blocks(struct drm_budd
+ * alloc_range_bias() called on range limitations, which traverses
+ * the tree and returns the desired block.
+ *
+- * alloc_from_freelist() called when *no* range restrictions
+- * are enforced, which picks the block from the freelist.
++ * alloc_from_freetree() called when *no* range restrictions
++ * are enforced, which picks the block from the freetree.
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+@@ -1115,7 +1156,7 @@ int drm_buddy_alloc_blocks(struct drm_bu
+ }
+ } while (1);
+
+- mark_allocated(block);
++ mark_allocated(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+@@ -1196,10 +1237,10 @@ void drm_buddy_print(struct drm_buddy *m
+ mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
+
+ for (order = mm->max_order; order >= 0; order--) {
+- struct drm_buddy_block *block;
++ struct drm_buddy_block *block, *tmp;
+ u64 count = 0, free;
+
+- list_for_each_entry(block, &mm->free_list[order], link) {
++ rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[order], rb) {
+ BUG_ON(!drm_buddy_block_is_free(block));
+ count++;
+ }
+--- a/include/drm/drm_buddy.h
++++ b/include/drm/drm_buddy.h
+@@ -10,6 +10,7 @@
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/sched.h>
++#include <linux/rbtree.h>
+
+ #include <drm/drm_print.h>
+
+@@ -53,7 +54,11 @@ struct drm_buddy_block {
+ * a list, if so desired. As soon as the block is freed with
+ * drm_buddy_free* ownership is given back to the mm.
+ */
+- struct list_head link;
++ union {
++ struct rb_node rb;
++ struct list_head link;
++ };
++
+ struct list_head tmp_link;
+ };
+
+@@ -68,7 +73,7 @@ struct drm_buddy_block {
+ */
+ struct drm_buddy {
+ /* Maintain a free list for each order. */
+- struct list_head *free_list;
++ struct rb_root *free_tree;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+@@ -94,7 +99,7 @@ struct drm_buddy {
+ };
+
+ static inline u64
+-drm_buddy_block_offset(struct drm_buddy_block *block)
++drm_buddy_block_offset(const struct drm_buddy_block *block)
+ {
+ return block->header & DRM_BUDDY_HEADER_OFFSET;
+ }
--- /dev/null
+From d4cd665c98c144dd6ad5d66d30396e13d23118c9 Mon Sep 17 00:00:00 2001
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Date: Mon, 6 Oct 2025 15:21:23 +0530
+Subject: drm/buddy: Separate clear and dirty free block trees
+
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+
+commit d4cd665c98c144dd6ad5d66d30396e13d23118c9 upstream.
+
+Maintain two separate RB trees per order - one for clear (zeroed) blocks
+and another for dirty (uncleared) blocks. This separation improves
+code clarity and makes it more obvious which tree is being searched
+during allocation. It also improves scalability and efficiency when
+searching for a specific type of block, avoiding unnecessary checks
+and making the allocator more predictable under fragmentation.
+
+The changes have been validated using the existing drm_buddy_test
+KUnit test cases, along with selected graphics workloads,
+to ensure correctness and avoid regressions.
+
+v2: Missed adding the suggested-by tag. Added it in v2.
+
+v3(Matthew):
+ - Remove the double underscores from the internal functions.
+ - Rename the internal functions to have less generic names.
+ - Fix the error handling code.
+ - Pass tree argument for the tree macro.
+ - Use the existing dirty/free bit instead of new tree field.
+ - Make free_trees[] instead of clear_tree and dirty_tree for
+ more cleaner approach.
+
+v4:
+ - A bug was reported by Intel CI and it is fixed by
+ Matthew Auld.
+ - Replace the get_root function with
+ &mm->free_trees[tree][order] (Matthew)
+ - Remove the unnecessary rbtree_is_empty() check (Matthew)
+ - Remove the unnecessary get_tree_for_flags() function.
+ - Rename get_tree_for_block() name with get_block_tree() for more
+ clarity.
+
+v5(Jani Nikula):
+ - Don't use static inline in .c files.
+ - enum free_tree and enumerator names are quite generic for a header
+ and usage and the whole enum should be an implementation detail.
+
+v6:
+ - Rewrite the __force_merge() function using the rb_last() and rb_prev().
+
+v7(Matthew):
+ - Replace the open-coded tree iteration for loops with the
+ for_each_free_tree() macro throughout the code.
+ - Fixed out_free_roots to prevent double decrement of i,
+ addressing potential crash.
+ - Replaced enum drm_buddy_free_tree with unsigned int
+ in for_each_free_tree loops.
+
+Cc: stable@vger.kernel.org
+Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality")
+Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Suggested-by: Matthew Auld <matthew.auld@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4260
+Link: https://lore.kernel.org/r/20251006095124.1663-2-Arunpravin.PaneerSelvam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_buddy.c | 329 ++++++++++++++++++++++++--------------------
+ include/drm/drm_buddy.h | 2
+ 2 files changed, 186 insertions(+), 145 deletions(-)
+
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -9,9 +9,16 @@
+
+ #include <drm/drm_buddy.h>
+
++enum drm_buddy_free_tree {
++ DRM_BUDDY_CLEAR_TREE = 0,
++ DRM_BUDDY_DIRTY_TREE,
++ DRM_BUDDY_MAX_FREE_TREES,
++};
++
+ static struct kmem_cache *slab_blocks;
+
+-#define rbtree_get_free_block(node) rb_entry((node), struct drm_buddy_block, rb)
++#define for_each_free_tree(tree) \
++ for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++)
+
+ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
+ struct drm_buddy_block *parent,
+@@ -42,6 +49,30 @@ static void drm_block_free(struct drm_bu
+ kmem_cache_free(slab_blocks, block);
+ }
+
++static enum drm_buddy_free_tree
++get_block_tree(struct drm_buddy_block *block)
++{
++ return drm_buddy_block_is_clear(block) ?
++ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
++}
++
++static struct drm_buddy_block *
++rbtree_get_free_block(const struct rb_node *node)
++{
++ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
++}
++
++static struct drm_buddy_block *
++rbtree_last_free_block(struct rb_root *root)
++{
++ return rbtree_get_free_block(rb_last(root));
++}
++
++static bool rbtree_is_empty(struct rb_root *root)
++{
++ return RB_EMPTY_ROOT(root);
++}
++
+ static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
+ const struct drm_buddy_block *node)
+ {
+@@ -56,37 +87,28 @@ static bool rbtree_block_offset_less(str
+ }
+
+ static void rbtree_insert(struct drm_buddy *mm,
+- struct drm_buddy_block *block)
++ struct drm_buddy_block *block,
++ enum drm_buddy_free_tree tree)
+ {
+ rb_add(&block->rb,
+- &mm->free_tree[drm_buddy_block_order(block)],
++ &mm->free_trees[tree][drm_buddy_block_order(block)],
+ rbtree_block_offset_less);
+ }
+
+ static void rbtree_remove(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+ {
++ unsigned int order = drm_buddy_block_order(block);
++ enum drm_buddy_free_tree tree;
+ struct rb_root *root;
+
+- root = &mm->free_tree[drm_buddy_block_order(block)];
+- rb_erase(&block->rb, root);
++ tree = get_block_tree(block);
++ root = &mm->free_trees[tree][order];
+
++ rb_erase(&block->rb, root);
+ RB_CLEAR_NODE(&block->rb);
+ }
+
+-static struct drm_buddy_block *
+-rbtree_last_entry(struct drm_buddy *mm, unsigned int order)
+-{
+- struct rb_node *node = rb_last(&mm->free_tree[order]);
+-
+- return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
+-}
+-
+-static bool rbtree_is_empty(struct drm_buddy *mm, unsigned int order)
+-{
+- return RB_EMPTY_ROOT(&mm->free_tree[order]);
+-}
+-
+ static void clear_reset(struct drm_buddy_block *block)
+ {
+ block->header &= ~DRM_BUDDY_HEADER_CLEAR;
+@@ -109,10 +131,13 @@ static void mark_allocated(struct drm_bu
+ static void mark_free(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+ {
++ enum drm_buddy_free_tree tree;
++
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_FREE;
+
+- rbtree_insert(mm, block);
++ tree = get_block_tree(block);
++ rbtree_insert(mm, block, tree);
+ }
+
+ static void mark_split(struct drm_buddy *mm,
+@@ -198,7 +223,7 @@ static int __force_merge(struct drm_budd
+ u64 end,
+ unsigned int min_order)
+ {
+- unsigned int order;
++ unsigned int tree, order;
+ int i;
+
+ if (!min_order)
+@@ -207,45 +232,48 @@ static int __force_merge(struct drm_budd
+ if (min_order > mm->max_order)
+ return -EINVAL;
+
+- for (i = min_order - 1; i >= 0; i--) {
+- struct rb_root *root = &mm->free_tree[i];
+- struct rb_node *iter;
++ for_each_free_tree(tree) {
++ for (i = min_order - 1; i >= 0; i--) {
++ struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
+
+- iter = rb_last(root);
+-
+- while (iter) {
+- struct drm_buddy_block *block, *buddy;
+- u64 block_start, block_end;
++ while (iter) {
++ struct drm_buddy_block *block, *buddy;
++ u64 block_start, block_end;
+
+- block = rbtree_get_free_block(iter);
+- iter = rb_prev(iter);
++ block = rbtree_get_free_block(iter);
++ iter = rb_prev(iter);
+
+- if (!block || !block->parent)
+- continue;
++ if (!block || !block->parent)
++ continue;
+
+- block_start = drm_buddy_block_offset(block);
+- block_end = block_start + drm_buddy_block_size(mm, block) - 1;
++ block_start = drm_buddy_block_offset(block);
++ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+
+- if (!contains(start, end, block_start, block_end))
+- continue;
++ if (!contains(start, end, block_start, block_end))
++ continue;
+
+- buddy = __get_buddy(block);
+- if (!drm_buddy_block_is_free(buddy))
+- continue;
++ buddy = __get_buddy(block);
++ if (!drm_buddy_block_is_free(buddy))
++ continue;
+
+- WARN_ON(drm_buddy_block_is_clear(block) ==
+- drm_buddy_block_is_clear(buddy));
++ WARN_ON(drm_buddy_block_is_clear(block) ==
++ drm_buddy_block_is_clear(buddy));
+
+- if (iter == &buddy->rb)
+- iter = rb_prev(iter);
++ /*
++ * Advance to the next node when the current node is the buddy,
++ * as freeing the block will also remove its buddy from the tree.
++ */
++ if (iter == &buddy->rb)
++ iter = rb_prev(iter);
+
+- rbtree_remove(mm, block);
+- if (drm_buddy_block_is_clear(block))
+- mm->clear_avail -= drm_buddy_block_size(mm, block);
++ rbtree_remove(mm, block);
++ if (drm_buddy_block_is_clear(block))
++ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+- order = __drm_buddy_free(mm, block, true);
+- if (order >= min_order)
+- return 0;
++ order = __drm_buddy_free(mm, block, true);
++ if (order >= min_order)
++ return 0;
++ }
+ }
+ }
+
+@@ -266,7 +294,7 @@ static int __force_merge(struct drm_budd
+ */
+ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+ {
+- unsigned int i;
++ unsigned int i, j, root_count = 0;
+ u64 offset = 0;
+
+ if (size < chunk_size)
+@@ -288,14 +316,22 @@ int drm_buddy_init(struct drm_buddy *mm,
+
+ BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
+
+- mm->free_tree = kmalloc_array(mm->max_order + 1,
+- sizeof(struct rb_root),
+- GFP_KERNEL);
+- if (!mm->free_tree)
++ mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES,
++ sizeof(*mm->free_trees),
++ GFP_KERNEL);
++ if (!mm->free_trees)
+ return -ENOMEM;
+
+- for (i = 0; i <= mm->max_order; ++i)
+- mm->free_tree[i] = RB_ROOT;
++ for_each_free_tree(i) {
++ mm->free_trees[i] = kmalloc_array(mm->max_order + 1,
++ sizeof(struct rb_root),
++ GFP_KERNEL);
++ if (!mm->free_trees[i])
++ goto out_free_tree;
++
++ for (j = 0; j <= mm->max_order; ++j)
++ mm->free_trees[i][j] = RB_ROOT;
++ }
+
+ mm->n_roots = hweight64(size);
+
+@@ -305,8 +341,6 @@ int drm_buddy_init(struct drm_buddy *mm,
+ if (!mm->roots)
+ goto out_free_tree;
+
+- i = 0;
+-
+ /*
+ * Split into power-of-two blocks, in case we are given a size that is
+ * not itself a power-of-two.
+@@ -325,24 +359,26 @@ int drm_buddy_init(struct drm_buddy *mm,
+
+ mark_free(mm, root);
+
+- BUG_ON(i > mm->max_order);
++ BUG_ON(root_count > mm->max_order);
+ BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
+
+- mm->roots[i] = root;
++ mm->roots[root_count] = root;
+
+ offset += root_size;
+ size -= root_size;
+- i++;
++ root_count++;
+ } while (size);
+
+ return 0;
+
+ out_free_roots:
+- while (i--)
+- drm_block_free(mm, mm->roots[i]);
++ while (root_count--)
++ drm_block_free(mm, mm->roots[root_count]);
+ kfree(mm->roots);
+ out_free_tree:
+- kfree(mm->free_tree);
++ while (i--)
++ kfree(mm->free_trees[i]);
++ kfree(mm->free_trees);
+ return -ENOMEM;
+ }
+ EXPORT_SYMBOL(drm_buddy_init);
+@@ -376,8 +412,9 @@ void drm_buddy_fini(struct drm_buddy *mm
+
+ WARN_ON(mm->avail != mm->size);
+
++ for_each_free_tree(i)
++ kfree(mm->free_trees[i]);
+ kfree(mm->roots);
+- kfree(mm->free_tree);
+ }
+ EXPORT_SYMBOL(drm_buddy_fini);
+
+@@ -401,8 +438,7 @@ static int split_block(struct drm_buddy
+ return -ENOMEM;
+ }
+
+- mark_free(mm, block->left);
+- mark_free(mm, block->right);
++ mark_split(mm, block);
+
+ if (drm_buddy_block_is_clear(block)) {
+ mark_cleared(block->left);
+@@ -410,7 +446,8 @@ static int split_block(struct drm_buddy
+ clear_reset(block);
+ }
+
+- mark_split(mm, block);
++ mark_free(mm, block->left);
++ mark_free(mm, block->right);
+
+ return 0;
+ }
+@@ -443,6 +480,7 @@ EXPORT_SYMBOL(drm_get_buddy);
+ */
+ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+ {
++ enum drm_buddy_free_tree src_tree, dst_tree;
+ u64 root_size, size, start;
+ unsigned int order;
+ int i;
+@@ -457,19 +495,24 @@ void drm_buddy_reset_clear(struct drm_bu
+ size -= root_size;
+ }
+
++ src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
++ dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
++
+ for (i = 0; i <= mm->max_order; ++i) {
++ struct rb_root *root = &mm->free_trees[src_tree][i];
+ struct drm_buddy_block *block, *tmp;
+
+- rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[i], rb) {
+- if (is_clear != drm_buddy_block_is_clear(block)) {
+- if (is_clear) {
+- mark_cleared(block);
+- mm->clear_avail += drm_buddy_block_size(mm, block);
+- } else {
+- clear_reset(block);
+- mm->clear_avail -= drm_buddy_block_size(mm, block);
+- }
++ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
++ rbtree_remove(mm, block);
++ if (is_clear) {
++ mark_cleared(block);
++ mm->clear_avail += drm_buddy_block_size(mm, block);
++ } else {
++ clear_reset(block);
++ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ }
++
++ rbtree_insert(mm, block, dst_tree);
+ }
+ }
+ }
+@@ -659,27 +702,17 @@ __drm_buddy_alloc_range_bias(struct drm_
+ }
+
+ static struct drm_buddy_block *
+-get_maxblock(struct drm_buddy *mm, unsigned int order,
+- unsigned long flags)
++get_maxblock(struct drm_buddy *mm,
++ unsigned int order,
++ enum drm_buddy_free_tree tree)
+ {
+ struct drm_buddy_block *max_block = NULL, *block = NULL;
++ struct rb_root *root;
+ unsigned int i;
+
+ for (i = order; i <= mm->max_order; ++i) {
+- struct rb_node *iter = rb_last(&mm->free_tree[i]);
+- struct drm_buddy_block *tmp_block;
+-
+- while (iter) {
+- tmp_block = rbtree_get_free_block(iter);
+-
+- if (!block_incompatible(tmp_block, flags)) {
+- block = tmp_block;
+- break;
+- }
+-
+- iter = rb_prev(iter);
+- }
+-
++ root = &mm->free_trees[tree][i];
++ block = rbtree_last_free_block(root);
+ if (!block)
+ continue;
+
+@@ -703,39 +736,37 @@ alloc_from_freetree(struct drm_buddy *mm
+ unsigned long flags)
+ {
+ struct drm_buddy_block *block = NULL;
++ struct rb_root *root;
++ enum drm_buddy_free_tree tree;
+ unsigned int tmp;
+ int err;
+
++ tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ?
++ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
++
+ if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+- block = get_maxblock(mm, order, flags);
++ block = get_maxblock(mm, order, tree);
+ if (block)
+ /* Store the obtained block order */
+ tmp = drm_buddy_block_order(block);
+ } else {
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+- struct rb_node *iter = rb_last(&mm->free_tree[tmp]);
+- struct drm_buddy_block *tmp_block;
+-
+- while (iter) {
+- tmp_block = rbtree_get_free_block(iter);
+-
+- if (!block_incompatible(tmp_block, flags)) {
+- block = tmp_block;
+- break;
+- }
+-
+- iter = rb_prev(iter);
+- }
+-
++ /* Get RB tree root for this order and tree */
++ root = &mm->free_trees[tree][tmp];
++ block = rbtree_last_free_block(root);
+ if (block)
+ break;
+ }
+ }
+
+ if (!block) {
+- /* Fallback method */
++ /* Try allocating from the other tree */
++ tree = (tree == DRM_BUDDY_CLEAR_TREE) ?
++ DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
++
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+- block = rbtree_last_entry(mm, tmp);
++ root = &mm->free_trees[tree][tmp];
++ block = rbtree_last_free_block(root);
+ if (block)
+ break;
+ }
+@@ -880,10 +911,9 @@ static int __alloc_contig_try_harder(str
+ {
+ u64 rhs_offset, lhs_offset, lhs_size, filled;
+ struct drm_buddy_block *block;
++ unsigned int tree, order;
+ LIST_HEAD(blocks_lhs);
+- struct rb_node *iter;
+ unsigned long pages;
+- unsigned int order;
+ u64 modify_size;
+ int err;
+
+@@ -893,40 +923,45 @@ static int __alloc_contig_try_harder(str
+ if (order == 0)
+ return -ENOSPC;
+
+- if (rbtree_is_empty(mm, order))
+- return -ENOSPC;
++ for_each_free_tree(tree) {
++ struct rb_root *root;
++ struct rb_node *iter;
+
+- iter = rb_last(&mm->free_tree[order]);
++ root = &mm->free_trees[tree][order];
++ if (rbtree_is_empty(root))
++ continue;
+
+- while (iter) {
+- block = rbtree_get_free_block(iter);
++ iter = rb_last(root);
++ while (iter) {
++ block = rbtree_get_free_block(iter);
+
+- /* Allocate blocks traversing RHS */
+- rhs_offset = drm_buddy_block_offset(block);
+- err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+- &filled, blocks);
+- if (!err || err != -ENOSPC)
+- return err;
+-
+- lhs_size = max((size - filled), min_block_size);
+- if (!IS_ALIGNED(lhs_size, min_block_size))
+- lhs_size = round_up(lhs_size, min_block_size);
+-
+- /* Allocate blocks traversing LHS */
+- lhs_offset = drm_buddy_block_offset(block) - lhs_size;
+- err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
+- NULL, &blocks_lhs);
+- if (!err) {
+- list_splice(&blocks_lhs, blocks);
+- return 0;
+- } else if (err != -ENOSPC) {
++ /* Allocate blocks traversing RHS */
++ rhs_offset = drm_buddy_block_offset(block);
++ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
++ &filled, blocks);
++ if (!err || err != -ENOSPC)
++ return err;
++
++ lhs_size = max((size - filled), min_block_size);
++ if (!IS_ALIGNED(lhs_size, min_block_size))
++ lhs_size = round_up(lhs_size, min_block_size);
++
++ /* Allocate blocks traversing LHS */
++ lhs_offset = drm_buddy_block_offset(block) - lhs_size;
++ err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
++ NULL, &blocks_lhs);
++ if (!err) {
++ list_splice(&blocks_lhs, blocks);
++ return 0;
++ } else if (err != -ENOSPC) {
++ drm_buddy_free_list_internal(mm, blocks);
++ return err;
++ }
++ /* Free blocks for the next iteration */
+ drm_buddy_free_list_internal(mm, blocks);
+- return err;
+- }
+- /* Free blocks for the next iteration */
+- drm_buddy_free_list_internal(mm, blocks);
+
+- iter = rb_prev(iter);
++ iter = rb_prev(iter);
++ }
+ }
+
+ return -ENOSPC;
+@@ -1238,11 +1273,17 @@ void drm_buddy_print(struct drm_buddy *m
+
+ for (order = mm->max_order; order >= 0; order--) {
+ struct drm_buddy_block *block, *tmp;
++ struct rb_root *root;
+ u64 count = 0, free;
++ unsigned int tree;
++
++ for_each_free_tree(tree) {
++ root = &mm->free_trees[tree][order];
+
+- rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[order], rb) {
+- BUG_ON(!drm_buddy_block_is_free(block));
+- count++;
++ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
++ BUG_ON(!drm_buddy_block_is_free(block));
++ count++;
++ }
+ }
+
+ drm_printf(p, "order-%2d ", order);
+--- a/include/drm/drm_buddy.h
++++ b/include/drm/drm_buddy.h
+@@ -73,7 +73,7 @@ struct drm_buddy_block {
+ */
+ struct drm_buddy {
+ /* Maintain a free list for each order. */
+- struct rb_root *free_tree;
++ struct rb_root **free_trees;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
--- /dev/null
+From 8b61583f993589a64c061aa91b44f5bd350d90a5 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Tue, 28 Oct 2025 22:07:26 +0200
+Subject: drm/edid: add DRM_EDID_IDENT_INIT() to initialize struct drm_edid_ident
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 8b61583f993589a64c061aa91b44f5bd350d90a5 upstream.
+
+Add a convenience helper for initializing struct drm_edid_ident.
+
+Cc: Tiago Martins Araújo <tiago.martins.araujo@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Tiago Martins Araújo <tiago.martins.araujo@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/710b2ac6a211606ec1f90afa57b79e8c7375a27e.1761681968.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/drm/drm_edid.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/drm/drm_edid.h
++++ b/include/drm/drm_edid.h
+@@ -333,6 +333,12 @@ struct drm_edid_ident {
+ const char *name;
+ };
+
++#define DRM_EDID_IDENT_INIT(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id, _name) \
++{ \
++ .panel_id = drm_edid_encode_panel_id(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id), \
++ .name = _name, \
++}
++
+ #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+
+ /* Short Audio Descriptor */
--- /dev/null
+From be729f9de6c64240645dc80a24162ac4d3fe00a8 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Mon, 29 Sep 2025 10:23:23 +0200
+Subject: drm/gma500: Remove unused helper psb_fbdev_fb_setcolreg()
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit be729f9de6c64240645dc80a24162ac4d3fe00a8 upstream.
+
+Remove psb_fbdev_fb_setcolreg(), which hasn't been called in almost
+a decade.
+
+Gma500 commit 4d8d096e9ae8 ("gma500: introduce the framebuffer support
+code") added the helper psb_fbdev_fb_setcolreg() for setting the fbdev
+palette via fbdev's fb_setcolreg callback. Later
+commit 3da6c2f3b730 ("drm/gma500: use DRM_FB_HELPER_DEFAULT_OPS for
+fb_ops") set several default helpers for fbdev emulation, including
+fb_setcmap.
+
+The fbdev subsystem always prefers fb_setcmap over fb_setcolreg. [1]
+Hence, the gma500 code is no longer in use and gma500 has been using
+drm_fb_helper_setcmap() for several years without issues.
+
+Fixes: 3da6c2f3b730 ("drm/gma500: use DRM_FB_HELPER_DEFAULT_OPS for fb_ops")
+Cc: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Cc: Stefan Christ <contact@stefanchrist.eu>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v4.10+
+Link: https://elixir.bootlin.com/linux/v6.16.9/source/drivers/video/fbdev/core/fbcmap.c#L246 # [1]
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Acked-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Link: https://lore.kernel.org/r/20250929082338.18845-1-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/gma500/fbdev.c | 43 -----------------------------------------
+ 1 file changed, 43 deletions(-)
+
+--- a/drivers/gpu/drm/gma500/fbdev.c
++++ b/drivers/gpu/drm/gma500/fbdev.c
+@@ -51,48 +51,6 @@ static const struct vm_operations_struct
+ * struct fb_ops
+ */
+
+-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+-
+-static int psb_fbdev_fb_setcolreg(unsigned int regno,
+- unsigned int red, unsigned int green,
+- unsigned int blue, unsigned int transp,
+- struct fb_info *info)
+-{
+- struct drm_fb_helper *fb_helper = info->par;
+- struct drm_framebuffer *fb = fb_helper->fb;
+- uint32_t v;
+-
+- if (!fb)
+- return -ENOMEM;
+-
+- if (regno > 255)
+- return 1;
+-
+- red = CMAP_TOHW(red, info->var.red.length);
+- blue = CMAP_TOHW(blue, info->var.blue.length);
+- green = CMAP_TOHW(green, info->var.green.length);
+- transp = CMAP_TOHW(transp, info->var.transp.length);
+-
+- v = (red << info->var.red.offset) |
+- (green << info->var.green.offset) |
+- (blue << info->var.blue.offset) |
+- (transp << info->var.transp.offset);
+-
+- if (regno < 16) {
+- switch (fb->format->cpp[0] * 8) {
+- case 16:
+- ((uint32_t *) info->pseudo_palette)[regno] = v;
+- break;
+- case 24:
+- case 32:
+- ((uint32_t *) info->pseudo_palette)[regno] = v;
+- break;
+- }
+- }
+-
+- return 0;
+-}
+-
+ static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+ {
+ if (vma->vm_pgoff != 0)
+@@ -137,7 +95,6 @@ static const struct fb_ops psb_fbdev_fb_
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ DRM_FB_HELPER_DEFAULT_OPS,
+- .fb_setcolreg = psb_fbdev_fb_setcolreg,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
+ .fb_mmap = psb_fbdev_fb_mmap,
+ .fb_destroy = psb_fbdev_fb_destroy,
--- /dev/null
+From 1c7f9e528f8f488b060b786bfb90b40540854db3 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri, 5 Dec 2025 12:35:01 +0100
+Subject: drm/i915: Fix format string truncation warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 1c7f9e528f8f488b060b786bfb90b40540854db3 upstream.
+
+GCC notices that the 16-byte uabi_name field could theoretically be too
+small for the formatted string if the instance number exceeds 100.
+
+So grow the field to 20 bytes.
+
+drivers/gpu/drm/i915/intel_memory_region.c: In function ‘intel_memory_region_create’:
+drivers/gpu/drm/i915/intel_memory_region.c:273:61: error: ‘%u’ directive output may be truncated writing between 1 and 5 bytes into a region of size between 3 and 11 [-Werror=format-truncation=]
+ 273 | snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ | ^~
+drivers/gpu/drm/i915/intel_memory_region.c:273:58: note: directive argument in the range [0, 65535]
+ 273 | snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ | ^~~~~~
+drivers/gpu/drm/i915/intel_memory_region.c:273:9: note: ‘snprintf’ output between 7 and 19 bytes into a destination of size 16
+ 273 | snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 274 | intel_memory_type_str(type), instance);
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Fixes: 3b38d3515753 ("drm/i915: Add stable memory region names")
+Cc: <stable@vger.kernel.org> # v6.8+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+Link: https://lore.kernel.org/r/20251205113500.684286-2-ardb@kernel.org
+(cherry picked from commit 18476087f1a18dc279d200d934ad94fba1fb51d5)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/intel_memory_region.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_memory_region.h
++++ b/drivers/gpu/drm/i915/intel_memory_region.h
+@@ -72,7 +72,7 @@ struct intel_memory_region {
+ u16 instance;
+ enum intel_region_id id;
+ char name[16];
+- char uabi_name[16];
++ char uabi_name[20];
+ bool private; /* not for userspace */
+
+ struct {
--- /dev/null
+From 4fe2bd195435e71c117983d87f278112c5ab364c Mon Sep 17 00:00:00 2001
+From: Krzysztof Niemiec <krzysztof.niemiec@intel.com>
+Date: Tue, 16 Dec 2025 19:09:01 +0100
+Subject: drm/i915/gem: Zero-initialize the eb.vma array in i915_gem_do_execbuffer
+
+From: Krzysztof Niemiec <krzysztof.niemiec@intel.com>
+
+commit 4fe2bd195435e71c117983d87f278112c5ab364c upstream.
+
+Initialize the eb.vma array with values of 0 when the eb structure is
+first set up. In particular, this sets the eb->vma[i].vma pointers to
+NULL, simplifying cleanup and getting rid of the bug described below.
+
+During the execution of eb_lookup_vmas(), the eb->vma array is
+successively filled up with struct eb_vma objects. This process includes
+calling eb_add_vma(), which might fail; however, even in the event of
+failure, eb->vma[i].vma is set for the currently processed buffer.
+
+If eb_add_vma() fails, eb_lookup_vmas() returns with an error, which
+prompts a call to eb_release_vmas() to clean up the mess. Since
+eb_lookup_vmas() might fail during processing any (possibly not first)
+buffer, eb_release_vmas() checks whether a buffer's vma is NULL to know
+at what point did the lookup function fail.
+
+In eb_lookup_vmas(), eb->vma[i].vma is set to NULL if either the helper
+function eb_lookup_vma() or eb_validate_vma() fails. eb->vma[i+1].vma is
+set to NULL in case i915_gem_object_userptr_submit_init() fails; the
+current one needs to be cleaned up by eb_release_vmas() at this point,
+so the next one is set. If eb_add_vma() fails, neither the current nor
+the next vma is set to NULL, which is a source of a NULL deref bug
+described in the issue linked in the Closes tag.
+
+When entering eb_lookup_vmas(), the vma pointers are set to the slab
+poison value, instead of NULL. This doesn't matter for the actual
+lookup, since it gets overwritten anyway, however the eb_release_vmas()
+function only recognizes NULL as the stopping value, hence the pointers
+are being set to NULL as they go in case of intermediate failure. This
+patch changes the approach to filling them all with NULL at the start
+instead, rather than handling that manually during failure.
+
+Reported-by: Gangmin Kim <km.kim1503@gmail.com>
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/15062
+Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
+Cc: stable@vger.kernel.org # 5.16.x
+Signed-off-by: Krzysztof Niemiec <krzysztof.niemiec@intel.com>
+Reviewed-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20251216180900.54294-2-krzysztof.niemiec@intel.com
+(cherry picked from commit 08889b706d4f0b8d2352b7ca29c2d8df4d0787cd)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 37 +++++++++++--------------
+ 1 file changed, 17 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -951,13 +951,13 @@ static int eb_lookup_vmas(struct i915_ex
+ vma = eb_lookup_vma(eb, eb->exec[i].handle);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+- goto err;
++ return err;
+ }
+
+ err = eb_validate_vma(eb, &eb->exec[i], vma);
+ if (unlikely(err)) {
+ i915_vma_put(vma);
+- goto err;
++ return err;
+ }
+
+ err = eb_add_vma(eb, ¤t_batch, i, vma);
+@@ -966,19 +966,8 @@ static int eb_lookup_vmas(struct i915_ex
+
+ if (i915_gem_object_is_userptr(vma->obj)) {
+ err = i915_gem_object_userptr_submit_init(vma->obj);
+- if (err) {
+- if (i + 1 < eb->buffer_count) {
+- /*
+- * Execbuffer code expects last vma entry to be NULL,
+- * since we already initialized this entry,
+- * set the next value to NULL or we mess up
+- * cleanup handling.
+- */
+- eb->vma[i + 1].vma = NULL;
+- }
+-
++ if (err)
+ return err;
+- }
+
+ eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
+ eb->args->flags |= __EXEC_USERPTR_USED;
+@@ -986,10 +975,6 @@ static int eb_lookup_vmas(struct i915_ex
+ }
+
+ return 0;
+-
+-err:
+- eb->vma[i].vma = NULL;
+- return err;
+ }
+
+ static int eb_lock_vmas(struct i915_execbuffer *eb)
+@@ -3374,7 +3359,8 @@ i915_gem_do_execbuffer(struct drm_device
+
+ eb.exec = exec;
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+- eb.vma[0].vma = NULL;
++ memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma));
++
+ eb.batch_pool = NULL;
+
+ eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
+@@ -3583,7 +3569,18 @@ i915_gem_execbuffer2_ioctl(struct drm_de
+ if (err)
+ return err;
+
+- /* Allocate extra slots for use by the command parser */
++ /*
++ * Allocate extra slots for use by the command parser.
++ *
++ * Note that this allocation handles two different arrays (the
++ * exec2_list array, and the eventual eb.vma array introduced in
++ * i915_gem_do_execbuffer()), that reside in virtually contiguous
++ * memory. Also note that the allocation intentionally doesn't fill the
++ * area with zeros, because the exec2_list part doesn't need to be, as
++ * it's immediately overwritten by user data a few lines below.
++ * However, the eb.vma part is explicitly zeroed later in
++ * i915_gem_do_execbuffer().
++ */
+ exec2_list = kvmalloc_array(count + 2, eb_element_size(),
+ __GFP_NOWARN | GFP_KERNEL);
+ if (exec2_list == NULL) {
--- /dev/null
+From 6b991ad8dc3abfe5720fc2e9ee96be63ae43e362 Mon Sep 17 00:00:00 2001
+From: Alessio Belle <alessio.belle@imgtec.com>
+Date: Mon, 8 Dec 2025 09:11:00 +0000
+Subject: drm/imagination: Disallow exporting of PM/FW protected objects
+
+From: Alessio Belle <alessio.belle@imgtec.com>
+
+commit 6b991ad8dc3abfe5720fc2e9ee96be63ae43e362 upstream.
+
+These objects are meant to be used by the GPU firmware or by the PM unit
+within the GPU, in which case they may contain physical addresses.
+
+This adds a layer of protection against exposing potentially exploitable
+information outside of the driver.
+
+Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code")
+Signed-off-by: Alessio Belle <alessio.belle@imgtec.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251208-no-export-pm-fw-obj-v1-1-83ab12c61693@imgtec.com
+Signed-off-by: Matt Coster <matt.coster@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/imagination/pvr_gem.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/gpu/drm/imagination/pvr_gem.c
++++ b/drivers/gpu/drm/imagination/pvr_gem.c
+@@ -27,6 +27,16 @@ static void pvr_gem_object_free(struct d
+ drm_gem_shmem_object_free(obj);
+ }
+
++static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
++{
++ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
++
++ if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
++ return ERR_PTR(-EPERM);
++
++ return drm_gem_prime_export(obj, flags);
++}
++
+ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
+ {
+ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
+@@ -41,6 +51,7 @@ static int pvr_gem_mmap(struct drm_gem_o
+ static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
+ .free = pvr_gem_object_free,
+ .print_info = drm_gem_shmem_object_print_info,
++ .export = pvr_gem_export,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
--- /dev/null
+From a846505a193d7492ad3531e33cacfca31e4bcdd1 Mon Sep 17 00:00:00 2001
+From: Miaoqian Lin <linmq006@gmail.com>
+Date: Wed, 29 Oct 2025 15:23:06 +0800
+Subject: drm/mediatek: Fix device node reference leak in mtk_dp_dt_parse()
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+commit a846505a193d7492ad3531e33cacfca31e4bcdd1 upstream.
+
+The function mtk_dp_dt_parse() calls of_graph_get_endpoint_by_regs()
+to get the endpoint device node, but fails to call of_node_put() to release
+the reference when the function returns. This results in a device node
+reference leak.
+
+Fix this by adding the missing of_node_put() call before returning from
+the function.
+
+Found via static analysis and code review.
+
+Fixes: f70ac097a2cf ("drm/mediatek: Add MT8195 Embedded DisplayPort driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Reviewed-by: CK Hu <ck.hu@mediatek.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20251029072307.10955-1-linmq006@gmail.com/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_dp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -2067,6 +2067,7 @@ static int mtk_dp_dt_parse(struct mtk_dp
+ endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1);
+ len = of_property_count_elems_of_size(endpoint,
+ "data-lanes", sizeof(u32));
++ of_node_put(endpoint);
+ if (len < 0 || len > 4 || len == 3) {
+ dev_err(dev, "invalid data lane size: %d\n", len);
+ return -EINVAL;
--- /dev/null
+From 2a2a04be8e869a19c9f950b89b1e05832a0f7ec7 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:38 +0200
+Subject: drm/mediatek: Fix probe device leaks
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 2a2a04be8e869a19c9f950b89b1e05832a0f7ec7 upstream.
+
+Make sure to drop the reference taken to each component device during
+probe on probe failure (e.g. probe deferral) and on driver unbind.
+
+Fixes: 6ea6f8276725 ("drm/mediatek: Use correct device pointer to get CMDQ client register")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-4-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+@@ -621,6 +621,13 @@ int mtk_find_possible_crtcs(struct drm_d
+ return ret;
+ }
+
++static void mtk_ddp_comp_put_device(void *_dev)
++{
++ struct device *dev = _dev;
++
++ put_device(dev);
++}
++
+ static void mtk_ddp_comp_clk_put(void *_clk)
+ {
+ struct clk *clk = _clk;
+@@ -656,6 +663,10 @@ int mtk_ddp_comp_init(struct device *dev
+ }
+ comp->dev = &comp_pdev->dev;
+
++ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_put_device, comp->dev);
++ if (ret)
++ return ret;
++
+ if (type == MTK_DISP_AAL ||
+ type == MTK_DISP_BLS ||
+ type == MTK_DISP_CCORR ||
--- /dev/null
+From 5e49200593f331cd0629b5376fab9192f698e8ef Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:37 +0200
+Subject: drm/mediatek: Fix probe memory leak
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 5e49200593f331cd0629b5376fab9192f698e8ef upstream.
+
+The Mediatek DRM driver allocates private data for components without a
+platform driver but as the lifetime is tied to each component device,
+the memory is never freed.
+
+Tie the allocation lifetime to the DRM platform device so that the
+memory is released on probe failure (e.g. probe deferral) and when the
+driver is unbound.
+
+Fixes: c0d36de868a6 ("drm/mediatek: Move clk info from struct mtk_ddp_comp to sub driver private data")
+Cc: stable@vger.kernel.org # 5.12
+Cc: CK Hu <ck.hu@mediatek.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-3-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+@@ -671,7 +671,7 @@ int mtk_ddp_comp_init(struct device *dev
+ type == MTK_DSI)
+ return 0;
+
+- priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
--- /dev/null
+From 07c7c640a8eb9e196f357d15d88a59602a947197 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:36 +0200
+Subject: drm/mediatek: Fix probe resource leaks
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 07c7c640a8eb9e196f357d15d88a59602a947197 upstream.
+
+Make sure to unmap and release the component iomap and clock on probe
+failure (e.g. probe deferral) and on driver unbind.
+
+Note that unlike of_iomap(), devm_of_iomap() also checks whether the
+region is already mapped.
+
+Fixes: 119f5173628a ("drm/mediatek: Add DRM Driver for Mediatek SoC MT8173.")
+Cc: stable@vger.kernel.org # 4.7
+Cc: CK Hu <ck.hu@mediatek.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-2-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 20 ++++++++++++++++----
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.h | 2 +-
+ drivers/gpu/drm/mediatek/mtk_drm_drv.c | 4 ++--
+ 3 files changed, 19 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+@@ -621,15 +621,20 @@ int mtk_find_possible_crtcs(struct drm_d
+ return ret;
+ }
+
+-int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
++static void mtk_ddp_comp_clk_put(void *_clk)
++{
++ struct clk *clk = _clk;
++
++ clk_put(clk);
++}
++
++int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp,
+ unsigned int comp_id)
+ {
+ struct platform_device *comp_pdev;
+ enum mtk_ddp_comp_type type;
+ struct mtk_ddp_comp_dev *priv;
+-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ int ret;
+-#endif
+
+ if (comp_id >= DDP_COMPONENT_DRM_ID_MAX)
+ return -EINVAL;
+@@ -670,11 +675,18 @@ int mtk_ddp_comp_init(struct device_node
+ if (!priv)
+ return -ENOMEM;
+
+- priv->regs = of_iomap(node, 0);
++ priv->regs = devm_of_iomap(dev, node, 0, NULL);
++ if (IS_ERR(priv->regs))
++ return PTR_ERR(priv->regs);
++
+ priv->clk = of_clk_get(node, 0);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
++ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_clk_put, priv->clk);
++ if (ret)
++ return ret;
++
+ #if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0);
+ if (ret)
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+@@ -350,7 +350,7 @@ static inline void mtk_ddp_comp_encoder_
+ int mtk_ddp_comp_get_id(struct device_node *node,
+ enum mtk_ddp_comp_type comp_type);
+ int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev);
+-int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
++int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp,
+ unsigned int comp_id);
+ enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id);
+ void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -874,7 +874,7 @@ static int mtk_drm_probe(struct platform
+ (void *)private->mmsys_dev,
+ sizeof(*private->mmsys_dev));
+ private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR].dev = &ovl_adaptor->dev;
+- mtk_ddp_comp_init(NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR],
++ mtk_ddp_comp_init(dev, NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR],
+ DDP_COMPONENT_DRM_OVL_ADAPTOR);
+ component_match_add(dev, &match, compare_dev, &ovl_adaptor->dev);
+ }
+@@ -943,7 +943,7 @@ static int mtk_drm_probe(struct platform
+ node);
+ }
+
+- ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id);
++ ret = mtk_ddp_comp_init(dev, node, &private->ddp_comp[comp_id], comp_id);
+ if (ret) {
+ of_node_put(node);
+ goto err_node;
--- /dev/null
+From e0f44f74ed6313e50b38eb39a2c7f210ae208db2 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:40 +0200
+Subject: drm/mediatek: ovl_adaptor: Fix probe device leaks
+
+From: Johan Hovold <johan@kernel.org>
+
+commit e0f44f74ed6313e50b38eb39a2c7f210ae208db2 upstream.
+
+Make sure to drop the references taken to the component devices by
+of_find_device_by_node() during probe on probe failure (e.g. probe
+deferral) and on driver unbind.
+
+Fixes: 453c3364632a ("drm/mediatek: Add ovl_adaptor support for MT8195")
+Cc: stable@vger.kernel.org # 6.4
+Cc: Nancy.Lin <nancy.lin@mediatek.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-6-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+@@ -497,6 +497,13 @@ static int compare_of(struct device *dev
+ return dev->of_node == data;
+ }
+
++static void ovl_adaptor_put_device(void *_dev)
++{
++ struct device *dev = _dev;
++
++ put_device(dev);
++}
++
+ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
+ {
+ struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
+@@ -532,6 +539,11 @@ static int ovl_adaptor_comp_init(struct
+ if (!comp_pdev)
+ return -EPROBE_DEFER;
+
++ ret = devm_add_action_or_reset(dev, ovl_adaptor_put_device,
++ &comp_pdev->dev);
++ if (ret)
++ return ret;
++
+ priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
+
+ drm_of_component_match_add(dev, match, compare_of, node);
--- /dev/null
+From 6cb31fba137d45e682ce455b8ea364f44d5d4f98 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ren=C3=A9=20Rebe?= <rene@exactco.de>
+Date: Mon, 8 Dec 2025 14:18:27 +0100
+Subject: drm/mgag200: Fix big-endian support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: René Rebe <rene@exactco.de>
+
+commit 6cb31fba137d45e682ce455b8ea364f44d5d4f98 upstream.
+
+Unlike the original, deleted Matrox mga driver, the new mgag200 driver
+has the XRGB frame-buffer byte swapped on big-endian "RISC"
+systems. Fix by enabling byte swapping "PowerPC" OPMODE for any
+__BIG_ENDIAN config.
+
+Fixes: 414c45310625 ("mgag200: initial g200se driver (v2)")
+Signed-off-by: René Rebe <rene@exactco.de>
+Cc: stable@kernel.org
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patch.msgid.link/20251208.141827.965103015954471168.rene@exactco.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mgag200/mgag200_mode.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -175,6 +175,30 @@ static void mgag200_set_startadd(struct
+ WREG_ECRT(0x00, crtcext0);
+ }
+
++/*
++ * Set the opmode for the hardware swapper for Big-Endian processor
++ * support for the frame buffer aperture and DMAWIN space.
++ */
++static void mgag200_set_datasiz(struct mga_device *mdev, u32 format)
++{
++#if defined(__BIG_ENDIAN)
++ u32 opmode = RREG32(MGAREG_OPMODE);
++
++ opmode &= ~(GENMASK(17, 16) | GENMASK(9, 8) | GENMASK(3, 2));
++
++ /* Big-endian byte-swapping */
++ switch (format) {
++ case DRM_FORMAT_RGB565:
++ opmode |= 0x10100;
++ break;
++ case DRM_FORMAT_XRGB8888:
++ opmode |= 0x20200;
++ break;
++ }
++ WREG32(MGAREG_OPMODE, opmode);
++#endif
++}
++
+ void mgag200_init_registers(struct mga_device *mdev)
+ {
+ u8 crtc11, misc;
+@@ -510,6 +534,7 @@ void mgag200_primary_plane_helper_atomic
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+
++ mgag200_set_datasiz(mdev, fb->format->format);
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage);
--- /dev/null
+From 779b68a5bf2764c8ed3aa800e41ba0d5d007e1e7 Mon Sep 17 00:00:00 2001
+From: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+Date: Tue, 18 Nov 2025 14:20:28 +0530
+Subject: drm/msm/a6xx: Fix out of bound IO access in a6xx_get_gmu_registers
+
+From: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+
+commit 779b68a5bf2764c8ed3aa800e41ba0d5d007e1e7 upstream.
+
+REG_A6XX_GMU_AO_AHB_FENCE_CTRL register falls under GMU's register
+range. So, use gmu_write() routines to write to this register.
+
+Fixes: 1707add81551 ("drm/msm/a6xx: Add a6xx gpu state")
+Cc: stable@vger.kernel.org
+Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Patchwork: https://patchwork.freedesktop.org/patch/688993/
+Message-ID: <20251118-kaana-gpu-support-v4-1-86eeb8e93fb6@oss.qualcomm.com>
+Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -1231,7 +1231,7 @@ static void a6xx_get_gmu_registers(struc
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+- gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
++ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
+ &a6xx_state->gmu_registers[2], false);
--- /dev/null
+From 88733a0b64872357e5ecd82b7488121503cb9cc6 Mon Sep 17 00:00:00 2001
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+Date: Thu, 11 Dec 2025 12:36:30 +0300
+Subject: drm/msm/dpu: Add missing NULL pointer check for pingpong interface
+
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+
+commit 88733a0b64872357e5ecd82b7488121503cb9cc6 upstream.
+
+It is checked almost always in dpu_encoder_phys_wb_setup_ctl(), but in a
+single place the check is missing.
+Also use convenient locals instead of phys_enc->* where available.
+
+Cc: stable@vger.kernel.org
+Fixes: d7d0e73f7de33 ("drm/msm/dpu: introduce the dpu_encoder_phys_* for writeback")
+Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Patchwork: https://patchwork.freedesktop.org/patch/693860/
+Link: https://lore.kernel.org/r/20251211093630.171014-1-kniv@yandex-team.ru
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -243,14 +243,12 @@ static void dpu_encoder_phys_wb_setup_ct
+ if (hw_cdm)
+ intf_cfg.cdm = hw_cdm->idx;
+
+- if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
+- phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+- mode_3d);
++ if (hw_pp && hw_pp->merge_3d && hw_pp->merge_3d->ops.setup_3d_mode)
++ hw_pp->merge_3d->ops.setup_3d_mode(hw_pp->merge_3d, mode_3d);
+
+ /* setup which pp blk will connect to this wb */
+- if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
+- phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb,
+- phys_enc->hw_pp->idx);
++ if (hw_pp && hw_wb->ops.bind_pingpong_blk)
++ hw_wb->ops.bind_pingpong_blk(hw_wb, hw_pp->idx);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
--- /dev/null
+From 560271e10b2c86e95ea35afa9e79822e4847f07a Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Thu, 11 Dec 2025 14:02:54 -0500
+Subject: drm/nouveau/dispnv50: Don't call drm_atomic_get_crtc_state() in prepare_fb
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 560271e10b2c86e95ea35afa9e79822e4847f07a upstream.
+
+Since we recently started warning about uses of this function after the
+atomic check phase completes, we've started getting warnings about this in
+nouveau. It appears a misplaced drm_atomic_get_crtc_state() call has been
+hiding in our .prepare_fb callback for a while.
+
+So, fix this by adding a new nv50_head_atom_get_new() function and use that
+in our .prepare_fb callback instead.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Dave Airlie <airlied@redhat.com>
+Fixes: 1590700d94ac ("drm/nouveau/kms/nv50-: split each resource type into their own source files")
+Cc: <stable@vger.kernel.org> # v4.18+
+Link: https://patch.msgid.link/20251211190256.396742-1-lyude@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/dispnv50/atom.h | 13 +++++++++++++
+ drivers/gpu/drm/nouveau/dispnv50/wndw.c | 2 +-
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
+@@ -152,8 +152,21 @@ static inline struct nv50_head_atom *
+ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+ {
+ struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
++
+ if (IS_ERR(statec))
+ return (void *)statec;
++
++ return nv50_head_atom(statec);
++}
++
++static inline struct nv50_head_atom *
++nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc)
++{
++ struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc);
++
++ if (!statec)
++ return NULL;
++
+ return nv50_head_atom(statec);
+ }
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -567,7 +567,7 @@ nv50_wndw_prepare_fb(struct drm_plane *p
+ asyw->image.offset[0] = nvbo->offset;
+
+ if (wndw->func->prepare) {
+- asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
++ asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+
--- /dev/null
+From 491adc6a0f9903c32b05f284df1148de39e8e644 Mon Sep 17 00:00:00 2001
+From: Simon Richter <Simon.Richter@hogyros.de>
+Date: Tue, 14 Oct 2025 01:11:33 +0900
+Subject: drm/ttm: Avoid NULL pointer deref for evicted BOs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Simon Richter <Simon.Richter@hogyros.de>
+
+commit 491adc6a0f9903c32b05f284df1148de39e8e644 upstream.
+
+It is possible for a BO to exist that is not currently associated with a
+resource, e.g. because it has been evicted.
+
+When devcoredump tries to read the contents of all BOs for dumping, we need
+to expect this as well -- in this case, ENODATA is recorded instead of the
+buffer contents.
+
+Fixes: 7d08df5d0bd3 ("drm/ttm: Add ttm_bo_access")
+Fixes: 09ac4fcb3f25 ("drm/ttm: Implement vm_operations_struct.access v2")
+Cc: stable <stable@kernel.org>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6271
+Signed-off-by: Simon Richter <Simon.Richter@hogyros.de>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251013161241.709916-1-Simon.Richter@hogyros.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -421,6 +421,11 @@ int ttm_bo_vm_access(struct vm_area_stru
+ if (ret)
+ return ret;
+
++ if (!bo->resource) {
++ ret = -ENODATA;
++ goto unlock;
++ }
++
+ switch (bo->resource->mem_type) {
+ case TTM_PL_SYSTEM:
+ fallthrough;
+@@ -435,6 +440,7 @@ int ttm_bo_vm_access(struct vm_area_stru
+ ret = -EIO;
+ }
+
++unlock:
+ ttm_bo_unreserve(bo);
+
+ return ret;
--- /dev/null
+From 6f0f404bd289d79a260b634c5b3f4d330b13472c Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Fri, 12 Dec 2025 10:28:41 -0800
+Subject: drm/xe: Adjust long-running workload timeslices to reasonable values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 6f0f404bd289d79a260b634c5b3f4d330b13472c upstream.
+
+A 10ms timeslice for long-running workloads is far too long and causes
+significant jitter in benchmarks when the system is shared. Adjust the
+value to 5ms for preempt-fencing VMs, as the resume step there is quite
+costly as memory is moved around, and set it to zero for pagefault VMs,
+since switching back to pagefault mode after dma-fence mode is
+relatively fast.
+
+Also change min_run_period_ms to 'unsiged int' type rather than 's64' as
+only positive values make sense.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Link: https://patch.msgid.link/20251212182847.1683222-2-matthew.brost@intel.com
+(cherry picked from commit 33a5abd9a68394aa67f9618b20eee65ee8702ff4)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 5 ++++-
+ drivers/gpu/drm/xe/xe_vm_types.h | 2 +-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1468,7 +1468,10 @@ struct xe_vm *xe_vm_create(struct xe_dev
+ INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
+
+ INIT_LIST_HEAD(&vm->preempt.exec_queues);
+- vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
++ if (flags & XE_VM_FLAG_FAULT_MODE)
++ vm->preempt.min_run_period_ms = 0;
++ else
++ vm->preempt.min_run_period_ms = 5;
+
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_init(&vm->rftree[id]);
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -243,7 +243,7 @@ struct xe_vm {
+ * @min_run_period_ms: The minimum run period before preempting
+ * an engine again
+ */
+- s64 min_run_period_ms;
++ unsigned int min_run_period_ms;
+ /** @exec_queues: list of exec queues attached to this VM */
+ struct list_head exec_queues;
+ /** @num_exec_queues: number exec queues attached to this VM */
--- /dev/null
+From 449bcd5d45eb4ce26740f11f8601082fe734bed2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Tue, 9 Dec 2025 21:49:20 +0100
+Subject: drm/xe/bo: Don't include the CCS metadata in the dma-buf sg-table
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 449bcd5d45eb4ce26740f11f8601082fe734bed2 upstream.
+
+Some Xe bos are allocated with extra backing-store for the CCS
+metadata. It's never been the intention to share the CCS metadata
+when exporting such bos as dma-buf. Don't include it in the
+dma-buf sg-table.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com>
+Link: https://patch.msgid.link/20251209204920.224374-1-thomas.hellstrom@linux.intel.com
+(cherry picked from commit a4ebfb9d95d78a12512b435a698ee6886d712571)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_dma_buf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/xe_dma_buf.c
+@@ -111,7 +111,7 @@ static struct sg_table *xe_dma_buf_map(s
+ case XE_PL_TT:
+ sgt = drm_prime_pages_to_sg(obj->dev,
+ bo->ttm.ttm->pages,
+- bo->ttm.ttm->num_pages);
++ obj->size >> PAGE_SHIFT);
+ if (IS_ERR(sgt))
+ return sgt;
+
--- /dev/null
+From fe3ccd24138fd391ae8e32289d492c85f67770fc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Wed, 17 Dec 2025 10:34:41 +0100
+Subject: drm/xe: Drop preempt-fences when destroying imported dma-bufs.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit fe3ccd24138fd391ae8e32289d492c85f67770fc upstream.
+
+When imported dma-bufs are destroyed, TTM is not fully
+individualizing the dma-resv, but it *is* copying the fences that
+need to be waited for before declaring idle. So in the case where
+the bo->resv != bo->_resv we can still drop the preempt-fences, but
+make sure we do that on bo->_resv which contains the fence-pointer
+copy.
+
+In the case where the copying fails, bo->_resv will typically not
+contain any fences pointers at all, so there will be nothing to
+drop. In that case, TTM would have ensured all fences that would
+have been copied are signaled, including any remaining preempt
+fences.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Fixes: fa0af721bd1f ("drm/ttm: test private resv obj on release/destroy")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.16+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Tested-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251217093441.5073-1-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 425fe550fb513b567bd6d01f397d274092a9c274)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_bo.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -1041,7 +1041,7 @@ static bool xe_ttm_bo_lock_in_destructor
+ * always succeed here, as long as we hold the lru lock.
+ */
+ spin_lock(&ttm_bo->bdev->lru_lock);
+- locked = dma_resv_trylock(ttm_bo->base.resv);
++ locked = dma_resv_trylock(&ttm_bo->base._resv);
+ spin_unlock(&ttm_bo->bdev->lru_lock);
+ xe_assert(xe, locked);
+
+@@ -1061,13 +1061,6 @@ static void xe_ttm_bo_release_notify(str
+ bo = ttm_to_xe_bo(ttm_bo);
+ xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
+
+- /*
+- * Corner case where TTM fails to allocate memory and this BOs resv
+- * still points the VMs resv
+- */
+- if (ttm_bo->base.resv != &ttm_bo->base._resv)
+- return;
+-
+ if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
+ return;
+
+@@ -1077,14 +1070,14 @@ static void xe_ttm_bo_release_notify(str
+ * TODO: Don't do this for external bos once we scrub them after
+ * unbind.
+ */
+- dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
++ dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
+ DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (xe_fence_is_xe_preempt(fence) &&
+ !dma_fence_is_signaled(fence)) {
+ if (!replacement)
+ replacement = dma_fence_get_stub();
+
+- dma_resv_replace_fences(ttm_bo->base.resv,
++ dma_resv_replace_fences(&ttm_bo->base._resv,
+ fence->context,
+ replacement,
+ DMA_RESV_USAGE_BOOKKEEP);
+@@ -1092,7 +1085,7 @@ static void xe_ttm_bo_release_notify(str
+ }
+ dma_fence_put(replacement);
+
+- dma_resv_unlock(ttm_bo->base.resv);
++ dma_resv_unlock(&ttm_bo->base._resv);
+ }
+
+ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
--- /dev/null
+From 3595114bc31d1eb5e1996164c901485c1ffac6f7 Mon Sep 17 00:00:00 2001
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Date: Thu, 11 Dec 2025 22:18:49 -0800
+Subject: drm/xe/oa: Disallow 0 OA property values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+commit 3595114bc31d1eb5e1996164c901485c1ffac6f7 upstream.
+
+An OA property value of 0 is invalid and will cause a NPD.
+
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6452
+Fixes: cc4e6994d5a2 ("drm/xe/oa: Move functions up so they can be reused for config ioctl")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Reviewed-by: Harish Chegondi <harish.chegondi@intel.com>
+Link: https://patch.msgid.link/20251212061850.1565459-3-ashutosh.dixit@intel.com
+(cherry picked from commit 7a100e6ddcc47c1f6ba7a19402de86ce24790621)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1266,7 +1266,7 @@ static int xe_oa_user_ext_set_property(s
+ ARRAY_SIZE(xe_oa_set_property_funcs_config));
+
+ if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
+- XE_IOCTL_DBG(oa->xe, ext.pad))
++ XE_IOCTL_DBG(oa->xe, !ext.property) || XE_IOCTL_DBG(oa->xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
--- /dev/null
+From dcb171931954c51a1a7250d558f02b8f36570783 Mon Sep 17 00:00:00 2001
+From: Sanjay Yadav <sanjay.kumar.yadav@intel.com>
+Date: Tue, 18 Nov 2025 17:19:00 +0530
+Subject: drm/xe/oa: Fix potential UAF in xe_oa_add_config_ioctl()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sanjay Yadav <sanjay.kumar.yadav@intel.com>
+
+commit dcb171931954c51a1a7250d558f02b8f36570783 upstream.
+
+In xe_oa_add_config_ioctl(), we accessed oa_config->id after dropping
+metrics_lock. Since this lock protects the lifetime of oa_config, an
+attacker could guess the id and call xe_oa_remove_config_ioctl() with
+perfect timing, freeing oa_config before we dereference it, leading to
+a potential use-after-free.
+
+Fix this by caching the id in a local variable while holding the lock.
+
+v2: (Matt A)
+- Dropped mutex_unlock(&oa->metrics_lock) ordering change from
+ xe_oa_remove_config_ioctl()
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6614
+Fixes: cdf02fe1a94a7 ("drm/xe/oa/uapi: Add/remove OA config perf ops")
+Cc: <stable@vger.kernel.org> # v6.11+
+Suggested-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Sanjay Yadav <sanjay.kumar.yadav@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patch.msgid.link/20251118114859.3379952-2-sanjay.kumar.yadav@intel.com
+(cherry picked from commit 28aeaed130e8e587fd1b73b6d66ca41ccc5a1a31)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -2378,11 +2378,13 @@ int xe_oa_add_config_ioctl(struct drm_de
+ goto sysfs_err;
+ }
+
+- mutex_unlock(&oa->metrics_lock);
++ id = oa_config->id;
++
++ drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, id);
+
+- drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
++ mutex_unlock(&oa->metrics_lock);
+
+- return oa_config->id;
++ return id;
+
+ sysfs_err:
+ mutex_unlock(&oa->metrics_lock);
--- /dev/null
+From 80f9c601d9c4d26f00356c0a9c461650e7089273 Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Fri, 12 Dec 2025 10:28:42 -0800
+Subject: drm/xe: Use usleep_range for accurate long-running workload timeslicing
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 80f9c601d9c4d26f00356c0a9c461650e7089273 upstream.
+
+msleep is not very accurate in terms of how long it actually sleeps,
+whereas usleep_range is precise. Replace the timeslice sleep for
+long-running workloads with the more accurate usleep_range to avoid
+jitter if the sleep period is less than 20ms.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Link: https://patch.msgid.link/20251212182847.1683222-3-matthew.brost@intel.com
+(cherry picked from commit ca415c4d4c17ad676a2c8981e1fcc432221dce79)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_guc_submit.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -578,6 +578,24 @@ static u32 wq_space_until_wrap(struct xe
+ return (WQ_SIZE - q->guc->wqi_tail);
+ }
+
++static inline void relaxed_ms_sleep(unsigned int delay_ms)
++{
++ unsigned long min_us, max_us;
++
++ if (!delay_ms)
++ return;
++
++ if (delay_ms > 20) {
++ msleep(delay_ms);
++ return;
++ }
++
++ min_us = mul_u32_u32(delay_ms, 1000);
++ max_us = min_us + 500;
++
++ usleep_range(min_us, max_us);
++}
++
+ static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
+ {
+ struct xe_guc *guc = exec_queue_to_guc(q);
+@@ -1356,7 +1374,7 @@ static void __guc_exec_queue_process_msg
+ since_resume_ms;
+
+ if (wait_ms > 0 && q->guc->resume_time)
+- msleep(wait_ms);
++ relaxed_ms_sleep(wait_ms);
+
+ set_exec_queue_suspended(q);
+ disable_scheduling(q, false);
--- /dev/null
+From 3925683515e93844be204381d2d5a1df5de34f31 Mon Sep 17 00:00:00 2001
+From: "Mario Limonciello (AMD)" <superm1@kernel.org>
+Date: Sat, 29 Nov 2025 19:46:31 -0600
+Subject: Revert "drm/amd: Skip power ungate during suspend for VPE"
+
+From: Mario Limonciello (AMD) <superm1@kernel.org>
+
+commit 3925683515e93844be204381d2d5a1df5de34f31 upstream.
+
+Skipping power ungate exposed some scenarios that will fail
+like below:
+
+```
+amdgpu: Register(0) [regVPEC_QUEUE_RESET_REQ] failed to reach value 0x00000000 != 0x00000001n
+amdgpu 0000:c1:00.0: amdgpu: VPE queue reset failed
+...
+amdgpu: [drm] *ERROR* wait_for_completion_timeout timeout!
+```
+
+The underlying s2idle issue that prompted this commit is going to
+be fixed in BIOS.
+This reverts commit 2a6c826cfeedd7714611ac115371a959ead55bda.
+
+Fixes: 2a6c826cfeed ("drm/amd: Skip power ungate during suspend for VPE")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reported-by: Konstantin <answer2019@yandex.ru>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220812
+Reported-by: Matthew Schwartz <matthew.schwartz@linux.dev>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3092,11 +3092,10 @@ int amdgpu_device_set_pg_state(struct am
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+- /* skip CG for VCE/UVD/VPE, it's handled specially */
++ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
+ adev->ip_blocks[i].version->funcs->set_powergating_state) {
+ /* enable powergating to save power */
net-usb-sr9700-fix-incorrect-command-used-to-write-single-register.patch
net-nfc-fix-deadlock-between-nfc_unregister_device-and-rfkill_fop_write.patch
net-macb-relocate-mog_init_rings-callback-from-macb_mac_link_up-to-macb_open.patch
+revert-drm-amd-skip-power-ungate-during-suspend-for-vpe.patch
+drm-amdgpu-gmc12-add-amdgpu_vm_handle_fault-handling.patch
+drm-amdgpu-add-missing-lock-to-amdgpu_ttm_access_memory_sdma.patch
+drm-amdgpu-gmc11-add-amdgpu_vm_handle_fault-handling.patch
+drm-msm-a6xx-fix-out-of-bound-io-access-in-a6xx_get_gmu_registers.patch
+drm-buddy-optimize-free-block-management-with-rb-tree.patch
+drm-buddy-separate-clear-and-dirty-free-block-trees.patch
+drm-gma500-remove-unused-helper-psb_fbdev_fb_setcolreg.patch
+drm-edid-add-drm_edid_ident_init-to-initialize-struct-drm_edid_ident.patch
+drm-xe-oa-fix-potential-uaf-in-xe_oa_add_config_ioctl.patch
+drm-mediatek-fix-device-node-reference-leak-in-mtk_dp_dt_parse.patch
+drm-mediatek-fix-probe-resource-leaks.patch
+drm-mediatek-fix-probe-memory-leak.patch
+drm-mediatek-fix-probe-device-leaks.patch
+drm-mediatek-ovl_adaptor-fix-probe-device-leaks.patch
+drm-amdkfd-export-the-cwsr_size-and-ctl_stack_size-to-userspace.patch
+drm-amdkfd-bump-minimum-vgpr-size-for-gfx1151.patch
+drm-amdkfd-trap-handler-support-for-expert-scheduling-mode.patch
+drm-i915-fix-format-string-truncation-warning.patch
+drm-ttm-avoid-null-pointer-deref-for-evicted-bos.patch
+drm-mgag200-fix-big-endian-support.patch
+drm-xe-bo-don-t-include-the-ccs-metadata-in-the-dma-buf-sg-table.patch
+drm-xe-oa-disallow-0-oa-property-values.patch
+drm-xe-adjust-long-running-workload-timeslices-to-reasonable-values.patch
+drm-xe-use-usleep_range-for-accurate-long-running-workload-timeslicing.patch
+drm-xe-drop-preempt-fences-when-destroying-imported-dma-bufs.patch
+drm-msm-dpu-add-missing-null-pointer-check-for-pingpong-interface.patch
+drm-i915-gem-zero-initialize-the-eb.vma-array-in-i915_gem_do_execbuffer.patch
+drm-nouveau-dispnv50-don-t-call-drm_atomic_get_crtc_state-in-prepare_fb.patch
+drm-imagination-disallow-exporting-of-pm-fw-protected-objects.patch