--- /dev/null
+From 93a01629c8bfd30906c76921ec986802d76920c6 Mon Sep 17 00:00:00 2001
+From: "Mario Limonciello (AMD)" <superm1@kernel.org>
+Date: Mon, 8 Dec 2025 22:46:46 -0600
+Subject: drm/amd: Fix unbind/rebind for VCN 4.0.5
+
+From: Mario Limonciello (AMD) <superm1@kernel.org>
+
+commit 93a01629c8bfd30906c76921ec986802d76920c6 upstream.
+
+Unbinding amdgpu has no problems, but binding it again leads to an
+error of sysfs file already existing. This is because it wasn't
+actually cleaned up on unbind. Add the missing cleanup step.
+
+Fixes: 547aad32edac ("drm/amdgpu: add VCN4 ip block support")
+Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit d717e62e9b6ccff0e3cec78a58dfbd00858448b3)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+@@ -265,6 +265,8 @@ static int vcn_v4_0_5_sw_fini(struct amd
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
++ amdgpu_vcn_sysfs_reset_mask_fini(adev);
++
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
--- /dev/null
+From 4fa944255be521b1bbd9780383f77206303a3a5c Mon Sep 17 00:00:00 2001
+From: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Date: Tue, 25 Nov 2025 10:48:39 +0100
+Subject: drm/amdgpu: add missing lock to amdgpu_ttm_access_memory_sdma
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+
+commit 4fa944255be521b1bbd9780383f77206303a3a5c upstream.
+
+Users of ttm entities need to hold the gtt_window_lock before using them
+to guarantee proper ordering of jobs.
+
+Cc: stable@vger.kernel.org
+Fixes: cb5cc4f573e1 ("drm/amdgpu: improve debug VRAM access performance using sdma")
+Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1529,6 +1529,7 @@ static int amdgpu_ttm_access_memory_sdma
+ if (r)
+ goto out;
+
++ mutex_lock(&adev->mman.gtt_window_lock);
+ amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
+ src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
+ src_mm.start;
+@@ -1543,6 +1544,7 @@ static int amdgpu_ttm_access_memory_sdma
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ fence = amdgpu_job_submit(job);
++ mutex_unlock(&adev->mman.gtt_window_lock);
+
+ if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
+ r = -ETIMEDOUT;
--- /dev/null
+From 8defb4f081a5feccc3ea8372d0c7af3522124e1f Mon Sep 17 00:00:00 2001
+From: Natalie Vock <natalie.vock@gmx.de>
+Date: Mon, 1 Dec 2025 12:52:38 -0500
+Subject: drm/amdgpu: Forward VMID reservation errors
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Natalie Vock <natalie.vock@gmx.de>
+
+commit 8defb4f081a5feccc3ea8372d0c7af3522124e1f upstream.
+
+Otherwise userspace may be fooled into believing it has a reserved VMID
+when in reality it doesn't, ultimately leading to GPU hangs when SPM is
+used.
+
+Fixes: 80e709ee6ecc ("drm/amdgpu: add option params to enforce process isolation between graphics and compute")
+Cc: stable@vger.kernel.org
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Natalie Vock <natalie.vock@gmx.de>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2910,8 +2910,7 @@ int amdgpu_vm_ioctl(struct drm_device *d
+ switch (args->in.op) {
+ case AMDGPU_VM_OP_RESERVE_VMID:
+ /* We only have requirement to reserve vmid from gfxhub */
+- amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
+- break;
++ return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
+ case AMDGPU_VM_OP_UNRESERVE_VMID:
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
+ break;
--- /dev/null
+From 3f2289b56cd98f5741056bdb6e521324eff07ce5 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Nov 2025 15:55:19 -0500
+Subject: drm/amdgpu/gmc11: add amdgpu_vm_handle_fault() handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 3f2289b56cd98f5741056bdb6e521324eff07ce5 upstream.
+
+We need to call amdgpu_vm_handle_fault() on page fault
+on all gfx9 and newer parts to properly update the
+page tables, not just for recoverable page faults.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -103,12 +103,39 @@ static int gmc_v11_0_process_interrupt(s
+ uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
+ AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
+ struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
++ bool retry_fault = !!(entry->src_data[1] & 0x80);
++ bool write_fault = !!(entry->src_data[1] & 0x20);
+ uint32_t status = 0;
+ u64 addr;
+
+ addr = (u64)entry->src_data[0] << 12;
+ addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
++ if (retry_fault) {
++ /* Returning 1 here also prevents sending the IV to the KFD */
++
++ /* Process it only if it's the first fault for this address */
++ if (entry->ih != &adev->irq.ih_soft &&
++ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
++ entry->timestamp))
++ return 1;
++
++ /* Delegate it to a different ring if the hardware hasn't
++ * already done it.
++ */
++ if (entry->ih == &adev->irq.ih) {
++ amdgpu_irq_delegate(adev, entry, 8);
++ return 1;
++ }
++
++ /* Try to handle the recoverable page faults by filling page
++ * tables
++ */
++ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
++ entry->timestamp, write_fault))
++ return 1;
++ }
++
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * Issue a dummy read to wait for the status register to
--- /dev/null
+From ff28ff98db6a8eeb469e02fb8bd1647b353232a9 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Nov 2025 15:57:43 -0500
+Subject: drm/amdgpu/gmc12: add amdgpu_vm_handle_fault() handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit ff28ff98db6a8eeb469e02fb8bd1647b353232a9 upstream.
+
+We need to call amdgpu_vm_handle_fault() on page fault
+on all gfx9 and newer parts to properly update the
+page tables, not just for recoverable page faults.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+@@ -91,6 +91,8 @@ static int gmc_v12_0_process_interrupt(s
+ struct amdgpu_iv_entry *entry)
+ {
+ struct amdgpu_vmhub *hub;
++ bool retry_fault = !!(entry->src_data[1] & 0x80);
++ bool write_fault = !!(entry->src_data[1] & 0x20);
+ uint32_t status = 0;
+ u64 addr;
+
+@@ -102,6 +104,31 @@ static int gmc_v12_0_process_interrupt(s
+ else
+ hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+
++ if (retry_fault) {
++ /* Returning 1 here also prevents sending the IV to the KFD */
++
++ /* Process it only if it's the first fault for this address */
++ if (entry->ih != &adev->irq.ih_soft &&
++ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
++ entry->timestamp))
++ return 1;
++
++ /* Delegate it to a different ring if the hardware hasn't
++ * already done it.
++ */
++ if (entry->ih == &adev->irq.ih) {
++ amdgpu_irq_delegate(adev, entry, 8);
++ return 1;
++ }
++
++ /* Try to handle the recoverable page faults by filling page
++ * tables
++ */
++ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
++ entry->timestamp, write_fault))
++ return 1;
++ }
++
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * Issue a dummy read to wait for the status register to
--- /dev/null
+From c8e7e3c2215e286ebfe66fe828ed426546c519e6 Mon Sep 17 00:00:00 2001
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Date: Tue, 25 Nov 2025 21:20:45 +0530
+Subject: drm/amdgpu/sdma6: Update SDMA 6.0.3 FW version to include UMQ protected-fence fix
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+commit c8e7e3c2215e286ebfe66fe828ed426546c519e6 upstream.
+
+On GFX11.0.3, earlier SDMA firmware versions issue the
+PROTECTED_FENCE write from the user VMID (e.g. VMID 8) instead of
+VMID 0. This causes a GPU VM protection fault when SDMA tries to
+write the secure fence location, as seen in the UMQ SDMA test
+(cs-sdma-with-IP-DMA-UMQ)
+
+Fixes the below GPU page fault:
+[ 514.037189] amdgpu 0000:0b:00.0: amdgpu: [gfxhub] page fault (src_id:0 ring:40 vmid:8 pasid:32770)
+[ 514.037199] amdgpu 0000:0b:00.0: amdgpu: Process pid 0 thread pid 0
+[ 514.037205] amdgpu 0000:0b:00.0: amdgpu: in page starting at address 0x00007fff00409000 from client 10
+[ 514.037212] amdgpu 0000:0b:00.0: amdgpu: GCVM_L2_PROTECTION_FAULT_STATUS:0x00841A51
+[ 514.037217] amdgpu 0000:0b:00.0: amdgpu: Faulty UTCL2 client ID: SDMA0 (0xd)
+[ 514.037223] amdgpu 0000:0b:00.0: amdgpu: MORE_FAULTS: 0x1
+[ 514.037227] amdgpu 0000:0b:00.0: amdgpu: WALKER_ERROR: 0x0
+[ 514.037232] amdgpu 0000:0b:00.0: amdgpu: PERMISSION_FAULTS: 0x5
+[ 514.037236] amdgpu 0000:0b:00.0: amdgpu: MAPPING_ERROR: 0x0
+[ 514.037241] amdgpu 0000:0b:00.0: amdgpu: RW: 0x1
+
+v2: Updated commit message
+v3: s/gfx11.0.3/sdma 6.0.3/ in patch title (Alex)
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+@@ -1389,7 +1389,7 @@ static int sdma_v6_0_sw_init(struct amdg
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 3):
+- if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
++ if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 0):
--- /dev/null
+From cf326449637a566ba98fb82c47d46cd479608c88 Mon Sep 17 00:00:00 2001
+From: Jonathan Kim <jonathan.kim@amd.com>
+Date: Fri, 5 Dec 2025 14:41:08 -0500
+Subject: drm/amdkfd: bump minimum vgpr size for gfx1151
+
+From: Jonathan Kim <jonathan.kim@amd.com>
+
+commit cf326449637a566ba98fb82c47d46cd479608c88 upstream.
+
+GFX1151 has 1.5x the number of available physical VGPRs per SIMD.
+Bump total memory availability for acquire checks on queue creation.
+
+Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit b42f3bf9536c9b710fd1d4deb7d1b0dc819dc72d)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -409,6 +409,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32
+ vgpr_size = 0x80000;
+ else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */
+ gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */
++ gfxv == 110501 || /* GFX_VERSION_GFX1151 */
+ gfxv == 120000 || /* GFX_VERSION_GFX1200 */
+ gfxv == 120001) /* GFX_VERSION_GFX1201 */
+ vgpr_size = 0x60000;
--- /dev/null
+From 8fc2796dea6f1210e1a01573961d5836a7ce531e Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Fri, 5 Dec 2025 12:41:58 -0600
+Subject: drm/amdkfd: Export the cwsr_size and ctl_stack_size to userspace
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 8fc2796dea6f1210e1a01573961d5836a7ce531e upstream.
+
+This is important for userspace to avoid hardcoding VGPR size.
+
+Reviewed-by: Kent Russell <kent.russell@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 71776e0965f9f730af19c5f548827f2a7c91f5a8)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -491,6 +491,10 @@ static ssize_t node_show(struct kobject
+ dev->node_props.num_sdma_queues_per_engine);
+ sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
+ dev->node_props.num_cp_queues);
++ sysfs_show_32bit_prop(buffer, offs, "cwsr_size",
++ dev->node_props.cwsr_size);
++ sysfs_show_32bit_prop(buffer, offs, "ctl_stack_size",
++ dev->node_props.ctl_stack_size);
+
+ if (dev->gpu) {
+ log_max_watch_addr =
--- /dev/null
+From b7851f8c66191cd23a0a08bd484465ad74bbbb7d Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <jay.cornwall@amd.com>
+Date: Fri, 14 Nov 2025 14:32:42 -0600
+Subject: drm/amdkfd: Trap handler support for expert scheduling mode
+
+From: Jay Cornwall <jay.cornwall@amd.com>
+
+commit b7851f8c66191cd23a0a08bd484465ad74bbbb7d upstream.
+
+The trap may be entered with dependency checking disabled.
+Wait for dependency counters and save/restore scheduling mode.
+
+v2:
+
+Use ttmp1 instead of ttmp11. ttmp11 is not zero-initialized.
+While the trap handler does zero this field before use, a user-mode
+second-level trap handler could not rely on this being zero when
+using an older kernel mode driver.
+
+v3:
+
+Use ttmp11 primarily but copy to ttmp1 before jumping to the
+second level trap handler. ttmp1 is inspectable by a debugger.
+Unexpected bits in the unused space may regress existing software.
+
+Signed-off-by: Jay Cornwall <jay.cornwall@amd.com>
+Reviewed-by: Lancelot Six <lancelot.six@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 423888879412e94725ca2bdccd89414887d98e31)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 62 +++++++++--------
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm | 37 ++++++++++
+ 2 files changed, 73 insertions(+), 26 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -3644,14 +3644,18 @@ static const uint32_t cwsr_trap_gfx9_4_3
+ };
+
+ static const uint32_t cwsr_trap_gfx12_hex[] = {
+- 0xbfa00001, 0xbfa002a2,
+- 0xb0804009, 0xb8f8f804,
++ 0xbfa00001, 0xbfa002b2,
++ 0xb0804009, 0xb8eef81a,
++ 0xbf880000, 0xb980081a,
++ 0x00000000, 0xb8f8f804,
++ 0x9177ff77, 0x0c000000,
++ 0x846e9a6e, 0x8c776e77,
+ 0x9178ff78, 0x00008c00,
+ 0xb8fbf811, 0x8b6eff78,
+ 0x00004000, 0xbfa10008,
+ 0x8b6eff7b, 0x00000080,
+ 0xbfa20018, 0x8b6ea07b,
+- 0xbfa20042, 0xbf830010,
++ 0xbfa2004a, 0xbf830010,
+ 0xb8fbf811, 0xbfa0fffb,
+ 0x8b6eff7b, 0x00000bd0,
+ 0xbfa20010, 0xb8eef812,
+@@ -3662,28 +3666,32 @@ static const uint32_t cwsr_trap_gfx12_he
+ 0xf0000000, 0xbfa20005,
+ 0x8b6fff6f, 0x00000200,
+ 0xbfa20002, 0x8b6ea07b,
+- 0xbfa2002c, 0xbefa4d82,
++ 0xbfa20034, 0xbefa4d82,
+ 0xbf8a0000, 0x84fa887a,
+ 0xbf0d8f7b, 0xbfa10002,
+ 0x8c7bff7b, 0xffff0000,
+- 0xf4601bbd, 0xf8000010,
+- 0xbf8a0000, 0x846e976e,
+- 0x9177ff77, 0x00800000,
+- 0x8c776e77, 0xf4603bbd,
+- 0xf8000000, 0xbf8a0000,
+- 0xf4603ebd, 0xf8000008,
+- 0xbf8a0000, 0x8bee6e6e,
+- 0xbfa10001, 0xbe80486e,
+- 0x8b6eff6d, 0xf0000000,
+- 0xbfa20009, 0xb8eef811,
+- 0x8b6eff6e, 0x00000080,
+- 0xbfa20007, 0x8c78ff78,
+- 0x00004000, 0x80ec886c,
+- 0x82ed806d, 0xbfa00002,
+- 0x806c846c, 0x826d806d,
+- 0x8b6dff6d, 0x0000ffff,
+- 0x8bfe7e7e, 0x8bea6a6a,
+- 0x85788978, 0xb9783244,
++ 0x8b6eff77, 0x0c000000,
++ 0x916dff6d, 0x0c000000,
++ 0x8c6d6e6d, 0xf4601bbd,
++ 0xf8000010, 0xbf8a0000,
++ 0x846e976e, 0x9177ff77,
++ 0x00800000, 0x8c776e77,
++ 0xf4603bbd, 0xf8000000,
++ 0xbf8a0000, 0xf4603ebd,
++ 0xf8000008, 0xbf8a0000,
++ 0x8bee6e6e, 0xbfa10001,
++ 0xbe80486e, 0x8b6eff6d,
++ 0xf0000000, 0xbfa20009,
++ 0xb8eef811, 0x8b6eff6e,
++ 0x00000080, 0xbfa20007,
++ 0x8c78ff78, 0x00004000,
++ 0x80ec886c, 0x82ed806d,
++ 0xbfa00002, 0x806c846c,
++ 0x826d806d, 0x8b6dff6d,
++ 0x0000ffff, 0x8bfe7e7e,
++ 0x8bea6a6a, 0x85788978,
++ 0x936eff77, 0x0002001a,
++ 0xb96ef81a, 0xb9783244,
+ 0xbe804a6c, 0xb8faf802,
+ 0xbf0d987a, 0xbfa10001,
+ 0xbfb00000, 0x8b6dff6d,
+@@ -3981,7 +3989,7 @@ static const uint32_t cwsr_trap_gfx12_he
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+- 0xbfa2fff7, 0xbfa0016e,
++ 0xbfa2fff7, 0xbfa00171,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+@@ -4163,12 +4171,14 @@ static const uint32_t cwsr_trap_gfx12_he
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+- 0xb97af804, 0xbe804ec2,
+- 0xbf94fffe, 0xbe804a6c,
++ 0x936eff77, 0x0002001a,
++ 0xb96ef81a, 0xb97af804,
+ 0xbe804ec2, 0xbf94fffe,
+- 0xbfb10000, 0xbf9f0000,
++ 0xbe804a6c, 0xbe804ec2,
++ 0xbf94fffe, 0xbfb10000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
++ 0xbf9f0000, 0x00000000,
+ };
+
+ static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+@@ -78,9 +78,16 @@ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_
+ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
++
++var SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT = 0
++var SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE = 2
++
+ var BARRIER_STATE_SIGNAL_OFFSET = 16
+ var BARRIER_STATE_VALID_OFFSET = 0
+
++var TTMP11_SCHED_MODE_SHIFT = 26
++var TTMP11_SCHED_MODE_SIZE = 2
++var TTMP11_SCHED_MODE_MASK = 0xC000000
+ var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+ var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
+
+@@ -160,8 +167,19 @@ L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE
+
+ L_SKIP_RESTORE:
++ // Assume most relaxed scheduling mode is set. Save and revert to normal mode.
++ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_SCHED_MODE)
++ s_wait_alu 0
++ s_setreg_imm32_b32 hwreg(HW_REG_WAVE_SCHED_MODE, \
++ SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT, SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE), 0
++
+ s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC
+
++ // Save SCHED_MODE[1:0] into ttmp11[27:26].
++ s_andn2_b32 ttmp11, ttmp11, TTMP11_SCHED_MODE_MASK
++ s_lshl_b32 ttmp2, ttmp2, TTMP11_SCHED_MODE_SHIFT
++ s_or_b32 ttmp11, ttmp11, ttmp2
++
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
+@@ -238,6 +256,13 @@ L_FETCH_2ND_TRAP:
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+ L_NO_SIGN_EXTEND_TMA:
++#if ASIC_FAMILY == CHIP_GFX12
++ // Move SCHED_MODE[1:0] from ttmp11 to unused bits in ttmp1[27:26] (return PC_HI).
++ // The second-level trap will restore from ttmp1 for backwards compatibility.
++ s_and_b32 ttmp2, ttmp11, TTMP11_SCHED_MODE_MASK
++ s_andn2_b32 ttmp1, ttmp1, TTMP11_SCHED_MODE_MASK
++ s_or_b32 ttmp1, ttmp1, ttmp2
++#endif
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag
+ s_wait_idle
+@@ -287,6 +312,10 @@ L_EXIT_TRAP:
+ // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+ // Only restore fields which the trap handler changes.
+ s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
++
++ // Assume relaxed scheduling mode after this point.
++ restore_sched_mode(ttmp2)
++
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
+
+@@ -1043,6 +1072,9 @@ L_SKIP_BARRIER_RESTORE:
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
++ // Assume relaxed scheduling mode after this point.
++ restore_sched_mode(s_restore_tmp)
++
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu
+
+ // Make barrier and LDS state visible to all waves in the group.
+@@ -1134,3 +1166,8 @@ function valu_sgpr_hazard
+ end
+ #endif
+ end
++
++function restore_sched_mode(s_tmp)
++ s_bfe_u32 s_tmp, ttmp11, (TTMP11_SCHED_MODE_SHIFT | (TTMP11_SCHED_MODE_SIZE << 0x10))
++ s_setreg_b32 hwreg(HW_REG_WAVE_SCHED_MODE), s_tmp
++end
--- /dev/null
+From 35e282c1868de3c9d15f9a8812cbb2e7da06b0c1 Mon Sep 17 00:00:00 2001
+From: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Date: Thu, 27 Nov 2025 09:42:40 +0100
+Subject: drm/bridge: ti-sn65dsi83: ignore PLL_UNLOCK errors
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Luca Ceresoli <luca.ceresoli@bootlin.com>
+
+commit 35e282c1868de3c9d15f9a8812cbb2e7da06b0c1 upstream.
+
+On hardware based on Toradex Verdin AM62 the recovery mechanism added by
+commit ad5c6ecef27e ("drm: bridge: ti-sn65dsi83: Add error recovery
+mechanism") has been reported [0] to make the display turn on and off and
+and the kernel logging "Unexpected link status 0x01".
+
+According to the report, the error recovery mechanism is triggered by the
+PLL_UNLOCK error going active. Analysis suggested the board is unable to
+provide the correct DSI clock neede by the SN65DSI84, to which the TI
+SN65DSI84 reacts by raising the PLL_UNLOCK, while the display still works
+apparently without issues.
+
+On other hardware, where all the clocks are within the components
+specifications, the PLL_UNLOCK bit does not trigger while the display is in
+normal use. It can trigger for e.g. electromagnetic interference, which is
+a transient event and exactly the reason why the error recovery mechanism
+has been implemented.
+
+Idelly the PLL_UNLOCK bit could be ignored when working out of
+specification, but this requires to detect in software whether it triggers
+because the device is working out of specification but visually correctly
+for the user or for good reasons (e.g. EMI, or even because working out of
+specifications but compromising the visual output).
+
+The ongoing analysis as of this writing [1][2] has not yet found a way for
+the driver to discriminate among the two cases. So as a temporary measure
+mask the PLL_UNLOCK error bit unconditionally.
+
+[0] https://lore.kernel.org/r/bhkn6hley4xrol5o3ytn343h4unkwsr26p6s6ltcwexnrsjsdx@mgkdf6ztow42
+[1] https://lore.kernel.org/all/b71e941c-fc8a-4ac1-9407-0fe7df73b412@gmail.com/
+[2] https://lore.kernel.org/all/20251125103900.31750-1-francesco@dolcini.it/
+
+Fixes: ad5c6ecef27e ("drm: bridge: ti-sn65dsi83: Add error recovery mechanism")
+Closes: https://lore.kernel.org/r/bhkn6hley4xrol5o3ytn343h4unkwsr26p6s6ltcwexnrsjsdx@mgkdf6ztow42
+Cc: stable@vger.kernel.org # 6.15+
+Reported-by: João Paulo Gonçalves <joao.goncalves@toradex.com>
+Tested-by: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+Co-developed-by: Hervé Codina <herve.codina@bootlin.com>
+Signed-off-by: Hervé Codina <herve.codina@bootlin.com>
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Link: https://patch.msgid.link/20251127-drm-ti-sn65dsi83-ignore-pll-unlock-v1-1-8a03fdf562e9@bootlin.com
+Signed-off-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/bridge/ti-sn65dsi83.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -429,7 +429,14 @@ static void sn65dsi83_handle_errors(stru
+ */
+
+ ret = regmap_read(ctx->regmap, REG_IRQ_STAT, &irq_stat);
+- if (ret || irq_stat) {
++
++ /*
++ * Some hardware (Toradex Verdin AM62) is known to report the
++ * PLL_UNLOCK error interrupt while working without visible
++ * problems. In lack of a reliable way to discriminate such cases
++ * from user-visible PLL_UNLOCK cases, ignore that bit entirely.
++ */
++ if (ret || irq_stat & ~REG_IRQ_STAT_CHA_PLL_UNLOCK) {
+ /*
+ * IRQ acknowledged is not always possible (the bridge can be in
+ * a state where it doesn't answer anymore). To prevent an
+@@ -654,7 +661,7 @@ static void sn65dsi83_atomic_enable(stru
+ if (ctx->irq) {
+ /* Enable irq to detect errors */
+ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, REG_IRQ_GLOBAL_IRQ_EN);
+- regmap_write(ctx->regmap, REG_IRQ_EN, 0xff);
++ regmap_write(ctx->regmap, REG_IRQ_EN, 0xff & ~REG_IRQ_EN_CHA_PLL_UNLOCK_EN);
+ } else {
+ /* Use the polling task */
+ sn65dsi83_monitor_start(ctx);
--- /dev/null
+From c178e534fff1d5a74da80ea03b20e2b948a00113 Mon Sep 17 00:00:00 2001
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Date: Mon, 6 Oct 2025 15:21:22 +0530
+Subject: drm/buddy: Optimize free block management with RB tree
+
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+
+commit c178e534fff1d5a74da80ea03b20e2b948a00113 upstream.
+
+Replace the freelist (O(n)) used for free block management with a
+red-black tree, providing more efficient O(log n) search, insert,
+and delete operations. This improves scalability and performance
+when managing large numbers of free blocks per order (e.g., hundreds
+or thousands).
+
+In the VK-CTS memory stress subtest, the buddy manager merges
+fragmented memory and inserts freed blocks into the freelist. Since
+freelist insertion is O(n), this becomes a bottleneck as fragmentation
+increases. Benchmarking shows list_insert_sorted() consumes ~52.69% CPU
+with the freelist, compared to just 0.03% with the RB tree
+(rbtree_insert.isra.0), despite performing the same sorted insert.
+
+This also improves performance in heavily fragmented workloads,
+such as games or graphics tests that stress memory.
+
+As the buddy allocator evolves with new features such as clear-page
+tracking, the resulting fragmentation and complexity have grown.
+These RB-tree based design changes are introduced to address that
+growth and ensure the allocator continues to perform efficiently
+under fragmented conditions.
+
+The RB tree implementation with separate clear/dirty trees provides:
+- O(n log n) aggregate complexity for all operations instead of O(n^2)
+- Elimination of soft lockups and system instability
+- Improved code maintainability and clarity
+- Better scalability for large memory systems
+- Predictable performance under fragmentation
+
+v3(Matthew):
+ - Remove RB_EMPTY_NODE check in force_merge function.
+ - Rename rb for loop macros to have less generic names and move to
+ .c file.
+ - Make the rb node rb and link field as union.
+
+v4(Jani Nikula):
+ - The kernel-doc comment should be "/**"
+ - Move all the rbtree macros to rbtree.h and add parens to ensure
+ correct precedence.
+
+v5:
+ - Remove the inline in a .c file (Jani Nikula).
+
+v6(Peter Zijlstra):
+ - Add rb_add() function replacing the existing rbtree_insert() code.
+
+v7:
+ - A full walk iteration in rbtree is slower than the list (Peter Zijlstra).
+ - The existing rbtree_postorder_for_each_entry_safe macro should be used
+ in scenarios where traversal order is not a critical factor (Christian).
+
+v8(Matthew):
+ - Remove the rbtree_is_empty() check in this patch as well.
+
+Cc: stable@vger.kernel.org
+Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality")
+Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20251006095124.1663-1-Arunpravin.PaneerSelvam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_buddy.c | 195 ++++++++++++++++++++++++++------------------
+ include/drm/drm_buddy.h | 11 +-
+ 2 files changed, 126 insertions(+), 80 deletions(-)
+
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -14,6 +14,8 @@
+
+ static struct kmem_cache *slab_blocks;
+
++#define rbtree_get_free_block(node) rb_entry((node), struct drm_buddy_block, rb)
++
+ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
+ struct drm_buddy_block *parent,
+ unsigned int order,
+@@ -31,6 +33,8 @@ static struct drm_buddy_block *drm_block
+ block->header |= order;
+ block->parent = parent;
+
++ RB_CLEAR_NODE(&block->rb);
++
+ BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
+ return block;
+ }
+@@ -41,23 +45,49 @@ static void drm_block_free(struct drm_bu
+ kmem_cache_free(slab_blocks, block);
+ }
+
+-static void list_insert_sorted(struct drm_buddy *mm,
+- struct drm_buddy_block *block)
++static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
++ const struct drm_buddy_block *node)
+ {
+- struct drm_buddy_block *node;
+- struct list_head *head;
++ return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
++}
+
+- head = &mm->free_list[drm_buddy_block_order(block)];
+- if (list_empty(head)) {
+- list_add(&block->link, head);
+- return;
+- }
++static bool rbtree_block_offset_less(struct rb_node *block,
++ const struct rb_node *node)
++{
++ return drm_buddy_block_offset_less(rbtree_get_free_block(block),
++ rbtree_get_free_block(node));
++}
+
+- list_for_each_entry(node, head, link)
+- if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
+- break;
++static void rbtree_insert(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
++{
++ rb_add(&block->rb,
++ &mm->free_tree[drm_buddy_block_order(block)],
++ rbtree_block_offset_less);
++}
++
++static void rbtree_remove(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
++{
++ struct rb_root *root;
++
++ root = &mm->free_tree[drm_buddy_block_order(block)];
++ rb_erase(&block->rb, root);
++
++ RB_CLEAR_NODE(&block->rb);
++}
++
++static struct drm_buddy_block *
++rbtree_last_entry(struct drm_buddy *mm, unsigned int order)
++{
++ struct rb_node *node = rb_last(&mm->free_tree[order]);
++
++ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
++}
+
+- __list_add(&block->link, node->link.prev, &node->link);
++static bool rbtree_is_empty(struct drm_buddy *mm, unsigned int order)
++{
++ return RB_EMPTY_ROOT(&mm->free_tree[order]);
+ }
+
+ static void clear_reset(struct drm_buddy_block *block)
+@@ -70,12 +100,13 @@ static void mark_cleared(struct drm_budd
+ block->header |= DRM_BUDDY_HEADER_CLEAR;
+ }
+
+-static void mark_allocated(struct drm_buddy_block *block)
++static void mark_allocated(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
+ {
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_ALLOCATED;
+
+- list_del(&block->link);
++ rbtree_remove(mm, block);
+ }
+
+ static void mark_free(struct drm_buddy *mm,
+@@ -84,15 +115,16 @@ static void mark_free(struct drm_buddy *
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_FREE;
+
+- list_insert_sorted(mm, block);
++ rbtree_insert(mm, block);
+ }
+
+-static void mark_split(struct drm_buddy_block *block)
++static void mark_split(struct drm_buddy *mm,
++ struct drm_buddy_block *block)
+ {
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_SPLIT;
+
+- list_del(&block->link);
++ rbtree_remove(mm, block);
+ }
+
+ static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+@@ -148,7 +180,7 @@ static unsigned int __drm_buddy_free(str
+ mark_cleared(parent);
+ }
+
+- list_del(&buddy->link);
++ rbtree_remove(mm, buddy);
+ if (force_merge && drm_buddy_block_is_clear(buddy))
+ mm->clear_avail -= drm_buddy_block_size(mm, buddy);
+
+@@ -179,13 +211,19 @@ static int __force_merge(struct drm_budd
+ return -EINVAL;
+
+ for (i = min_order - 1; i >= 0; i--) {
+- struct drm_buddy_block *block, *prev;
++ struct rb_root *root = &mm->free_tree[i];
++ struct rb_node *iter;
++
++ iter = rb_last(root);
+
+- list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
+- struct drm_buddy_block *buddy;
++ while (iter) {
++ struct drm_buddy_block *block, *buddy;
+ u64 block_start, block_end;
+
+- if (!block->parent)
++ block = rbtree_get_free_block(iter);
++ iter = rb_prev(iter);
++
++ if (!block || !block->parent)
+ continue;
+
+ block_start = drm_buddy_block_offset(block);
+@@ -201,15 +239,10 @@ static int __force_merge(struct drm_budd
+ WARN_ON(drm_buddy_block_is_clear(block) ==
+ drm_buddy_block_is_clear(buddy));
+
+- /*
+- * If the prev block is same as buddy, don't access the
+- * block in the next iteration as we would free the
+- * buddy block as part of the free function.
+- */
+- if (prev == buddy)
+- prev = list_prev_entry(prev, link);
++ if (iter == &buddy->rb)
++ iter = rb_prev(iter);
+
+- list_del(&block->link);
++ rbtree_remove(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+@@ -237,7 +270,7 @@ static int __force_merge(struct drm_budd
+ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+ {
+ unsigned int i;
+- u64 offset;
++ u64 offset = 0;
+
+ if (size < chunk_size)
+ return -EINVAL;
+@@ -258,14 +291,14 @@ int drm_buddy_init(struct drm_buddy *mm,
+
+ BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
+
+- mm->free_list = kmalloc_array(mm->max_order + 1,
+- sizeof(struct list_head),
++ mm->free_tree = kmalloc_array(mm->max_order + 1,
++ sizeof(struct rb_root),
+ GFP_KERNEL);
+- if (!mm->free_list)
++ if (!mm->free_tree)
+ return -ENOMEM;
+
+ for (i = 0; i <= mm->max_order; ++i)
+- INIT_LIST_HEAD(&mm->free_list[i]);
++ mm->free_tree[i] = RB_ROOT;
+
+ mm->n_roots = hweight64(size);
+
+@@ -273,9 +306,8 @@ int drm_buddy_init(struct drm_buddy *mm,
+ sizeof(struct drm_buddy_block *),
+ GFP_KERNEL);
+ if (!mm->roots)
+- goto out_free_list;
++ goto out_free_tree;
+
+- offset = 0;
+ i = 0;
+
+ /*
+@@ -312,8 +344,8 @@ out_free_roots:
+ while (i--)
+ drm_block_free(mm, mm->roots[i]);
+ kfree(mm->roots);
+-out_free_list:
+- kfree(mm->free_list);
++out_free_tree:
++ kfree(mm->free_tree);
+ return -ENOMEM;
+ }
+ EXPORT_SYMBOL(drm_buddy_init);
+@@ -323,7 +355,7 @@ EXPORT_SYMBOL(drm_buddy_init);
+ *
+ * @mm: DRM buddy manager to free
+ *
+- * Cleanup memory manager resources and the freelist
++ * Cleanup memory manager resources and the freetree
+ */
+ void drm_buddy_fini(struct drm_buddy *mm)
+ {
+@@ -350,7 +382,7 @@ void drm_buddy_fini(struct drm_buddy *mm
+ WARN_ON(mm->avail != mm->size);
+
+ kfree(mm->roots);
+- kfree(mm->free_list);
++ kfree(mm->free_tree);
+ }
+ EXPORT_SYMBOL(drm_buddy_fini);
+
+@@ -383,7 +415,7 @@ static int split_block(struct drm_buddy
+ clear_reset(block);
+ }
+
+- mark_split(block);
++ mark_split(mm, block);
+
+ return 0;
+ }
+@@ -412,7 +444,7 @@ EXPORT_SYMBOL(drm_get_buddy);
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+- * in the freelist.
++ * in the freetree.
+ */
+ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+ {
+@@ -431,9 +463,9 @@ void drm_buddy_reset_clear(struct drm_bu
+ }
+
+ for (i = 0; i <= mm->max_order; ++i) {
+- struct drm_buddy_block *block;
++ struct drm_buddy_block *block, *tmp;
+
+- list_for_each_entry_reverse(block, &mm->free_list[i], link) {
++ rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[i], rb) {
+ if (is_clear != drm_buddy_block_is_clear(block)) {
+ if (is_clear) {
+ mark_cleared(block);
+@@ -639,14 +671,18 @@ get_maxblock(struct drm_buddy *mm, unsig
+ unsigned int i;
+
+ for (i = order; i <= mm->max_order; ++i) {
++ struct rb_node *iter = rb_last(&mm->free_tree[i]);
+ struct drm_buddy_block *tmp_block;
+
+- list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
+- if (block_incompatible(tmp_block, flags))
+- continue;
++ while (iter) {
++ tmp_block = rbtree_get_free_block(iter);
+
+- block = tmp_block;
+- break;
++ if (!block_incompatible(tmp_block, flags)) {
++ block = tmp_block;
++ break;
++ }
++
++ iter = rb_prev(iter);
+ }
+
+ if (!block)
+@@ -667,7 +703,7 @@ get_maxblock(struct drm_buddy *mm, unsig
+ }
+
+ static struct drm_buddy_block *
+-alloc_from_freelist(struct drm_buddy *mm,
++alloc_from_freetree(struct drm_buddy *mm,
+ unsigned int order,
+ unsigned long flags)
+ {
+@@ -682,14 +718,18 @@ alloc_from_freelist(struct drm_buddy *mm
+ tmp = drm_buddy_block_order(block);
+ } else {
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
++ struct rb_node *iter = rb_last(&mm->free_tree[tmp]);
+ struct drm_buddy_block *tmp_block;
+
+- list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
+- if (block_incompatible(tmp_block, flags))
+- continue;
++ while (iter) {
++ tmp_block = rbtree_get_free_block(iter);
+
+- block = tmp_block;
+- break;
++ if (!block_incompatible(tmp_block, flags)) {
++ block = tmp_block;
++ break;
++ }
++
++ iter = rb_prev(iter);
+ }
+
+ if (block)
+@@ -700,13 +740,9 @@ alloc_from_freelist(struct drm_buddy *mm
+ if (!block) {
+ /* Fallback method */
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+- if (!list_empty(&mm->free_list[tmp])) {
+- block = list_last_entry(&mm->free_list[tmp],
+- struct drm_buddy_block,
+- link);
+- if (block)
+- break;
+- }
++ block = rbtree_last_entry(mm, tmp);
++ if (block)
++ break;
+ }
+
+ if (!block)
+@@ -771,7 +807,7 @@ static int __alloc_range(struct drm_budd
+
+ if (contains(start, end, block_start, block_end)) {
+ if (drm_buddy_block_is_free(block)) {
+- mark_allocated(block);
++ mark_allocated(mm, block);
+ total_allocated += drm_buddy_block_size(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+@@ -849,8 +885,8 @@ static int __alloc_contig_try_harder(str
+ {
+ u64 rhs_offset, lhs_offset, lhs_size, filled;
+ struct drm_buddy_block *block;
+- struct list_head *list;
+ LIST_HEAD(blocks_lhs);
++ struct rb_node *iter;
+ unsigned long pages;
+ unsigned int order;
+ u64 modify_size;
+@@ -862,11 +898,14 @@ static int __alloc_contig_try_harder(str
+ if (order == 0)
+ return -ENOSPC;
+
+- list = &mm->free_list[order];
+- if (list_empty(list))
++ if (rbtree_is_empty(mm, order))
+ return -ENOSPC;
+
+- list_for_each_entry_reverse(block, list, link) {
++ iter = rb_last(&mm->free_tree[order]);
++
++ while (iter) {
++ block = rbtree_get_free_block(iter);
++
+ /* Allocate blocks traversing RHS */
+ rhs_offset = drm_buddy_block_offset(block);
+ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+@@ -891,6 +930,8 @@ static int __alloc_contig_try_harder(str
+ }
+ /* Free blocks for the next iteration */
+ drm_buddy_free_list_internal(mm, blocks);
++
++ iter = rb_prev(iter);
+ }
+
+ return -ENOSPC;
+@@ -976,7 +1017,7 @@ int drm_buddy_block_trim(struct drm_budd
+ list_add(&block->tmp_link, &dfs);
+ err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
+ if (err) {
+- mark_allocated(block);
++ mark_allocated(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+@@ -999,8 +1040,8 @@ __drm_buddy_alloc_blocks(struct drm_budd
+ return __drm_buddy_alloc_range_bias(mm, start, end,
+ order, flags);
+ else
+- /* Allocate from freelist */
+- return alloc_from_freelist(mm, order, flags);
++ /* Allocate from freetree */
++ return alloc_from_freetree(mm, order, flags);
+ }
+
+ /**
+@@ -1017,8 +1058,8 @@ __drm_buddy_alloc_blocks(struct drm_budd
+ * alloc_range_bias() called on range limitations, which traverses
+ * the tree and returns the desired block.
+ *
+- * alloc_from_freelist() called when *no* range restrictions
+- * are enforced, which picks the block from the freelist.
++ * alloc_from_freetree() called when *no* range restrictions
++ * are enforced, which picks the block from the freetree.
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+@@ -1120,7 +1161,7 @@ int drm_buddy_alloc_blocks(struct drm_bu
+ }
+ } while (1);
+
+- mark_allocated(block);
++ mark_allocated(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+@@ -1201,10 +1242,10 @@ void drm_buddy_print(struct drm_buddy *m
+ mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
+
+ for (order = mm->max_order; order >= 0; order--) {
+- struct drm_buddy_block *block;
++ struct drm_buddy_block *block, *tmp;
+ u64 count = 0, free;
+
+- list_for_each_entry(block, &mm->free_list[order], link) {
++ rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[order], rb) {
+ BUG_ON(!drm_buddy_block_is_free(block));
+ count++;
+ }
+--- a/include/drm/drm_buddy.h
++++ b/include/drm/drm_buddy.h
+@@ -10,6 +10,7 @@
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/sched.h>
++#include <linux/rbtree.h>
+
+ #include <drm/drm_print.h>
+
+@@ -44,7 +45,11 @@ struct drm_buddy_block {
+ * a list, if so desired. As soon as the block is freed with
+ * drm_buddy_free* ownership is given back to the mm.
+ */
+- struct list_head link;
++ union {
++ struct rb_node rb;
++ struct list_head link;
++ };
++
+ struct list_head tmp_link;
+ };
+
+@@ -59,7 +64,7 @@ struct drm_buddy_block {
+ */
+ struct drm_buddy {
+ /* Maintain a free list for each order. */
+- struct list_head *free_list;
++ struct rb_root *free_tree;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+@@ -85,7 +90,7 @@ struct drm_buddy {
+ };
+
+ static inline u64
+-drm_buddy_block_offset(struct drm_buddy_block *block)
++drm_buddy_block_offset(const struct drm_buddy_block *block)
+ {
+ return block->header & DRM_BUDDY_HEADER_OFFSET;
+ }
--- /dev/null
+From d4cd665c98c144dd6ad5d66d30396e13d23118c9 Mon Sep 17 00:00:00 2001
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Date: Mon, 6 Oct 2025 15:21:23 +0530
+Subject: drm/buddy: Separate clear and dirty free block trees
+
+From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+
+commit d4cd665c98c144dd6ad5d66d30396e13d23118c9 upstream.
+
+Maintain two separate RB trees per order - one for clear (zeroed) blocks
+and another for dirty (uncleared) blocks. This separation improves
+code clarity and makes it more obvious which tree is being searched
+during allocation. It also improves scalability and efficiency when
+searching for a specific type of block, avoiding unnecessary checks
+and making the allocator more predictable under fragmentation.
+
+The changes have been validated using the existing drm_buddy_test
+KUnit test cases, along with selected graphics workloads,
+to ensure correctness and avoid regressions.
+
+v2: Missed adding the suggested-by tag. Added it in v2.
+
+v3(Matthew):
+ - Remove the double underscores from the internal functions.
+ - Rename the internal functions to have less generic names.
+ - Fix the error handling code.
+ - Pass tree argument for the tree macro.
+ - Use the existing dirty/free bit instead of new tree field.
+ - Make free_trees[] instead of clear_tree and dirty_tree for
+ more cleaner approach.
+
+v4:
+ - A bug was reported by Intel CI and it is fixed by
+ Matthew Auld.
+ - Replace the get_root function with
+ &mm->free_trees[tree][order] (Matthew)
+ - Remove the unnecessary rbtree_is_empty() check (Matthew)
+ - Remove the unnecessary get_tree_for_flags() function.
+ - Rename get_tree_for_block() name with get_block_tree() for more
+ clarity.
+
+v5(Jani Nikula):
+ - Don't use static inline in .c files.
+ - enum free_tree and enumerator names are quite generic for a header
+ and usage and the whole enum should be an implementation detail.
+
+v6:
+ - Rewrite the __force_merge() function using the rb_last() and rb_prev().
+
+v7(Matthew):
+ - Replace the open-coded tree iteration for loops with the
+ for_each_free_tree() macro throughout the code.
+ - Fixed out_free_roots to prevent double decrement of i,
+ addressing potential crash.
+ - Replaced enum drm_buddy_free_tree with unsigned int
+ in for_each_free_tree loops.
+
+Cc: stable@vger.kernel.org
+Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality")
+Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Suggested-by: Matthew Auld <matthew.auld@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4260
+Link: https://lore.kernel.org/r/20251006095124.1663-2-Arunpravin.PaneerSelvam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_buddy.c | 329 ++++++++++++++++++++++++--------------------
+ include/drm/drm_buddy.h | 2
+ 2 files changed, 186 insertions(+), 145 deletions(-)
+
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -12,9 +12,16 @@
+
+ #include <drm/drm_buddy.h>
+
++enum drm_buddy_free_tree {
++ DRM_BUDDY_CLEAR_TREE = 0,
++ DRM_BUDDY_DIRTY_TREE,
++ DRM_BUDDY_MAX_FREE_TREES,
++};
++
+ static struct kmem_cache *slab_blocks;
+
+-#define rbtree_get_free_block(node) rb_entry((node), struct drm_buddy_block, rb)
++#define for_each_free_tree(tree) \
++ for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++)
+
+ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
+ struct drm_buddy_block *parent,
+@@ -45,6 +52,30 @@ static void drm_block_free(struct drm_bu
+ kmem_cache_free(slab_blocks, block);
+ }
+
++static enum drm_buddy_free_tree
++get_block_tree(struct drm_buddy_block *block)
++{
++ return drm_buddy_block_is_clear(block) ?
++ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
++}
++
++static struct drm_buddy_block *
++rbtree_get_free_block(const struct rb_node *node)
++{
++ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
++}
++
++static struct drm_buddy_block *
++rbtree_last_free_block(struct rb_root *root)
++{
++ return rbtree_get_free_block(rb_last(root));
++}
++
++static bool rbtree_is_empty(struct rb_root *root)
++{
++ return RB_EMPTY_ROOT(root);
++}
++
+ static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
+ const struct drm_buddy_block *node)
+ {
+@@ -59,37 +90,28 @@ static bool rbtree_block_offset_less(str
+ }
+
+ static void rbtree_insert(struct drm_buddy *mm,
+- struct drm_buddy_block *block)
++ struct drm_buddy_block *block,
++ enum drm_buddy_free_tree tree)
+ {
+ rb_add(&block->rb,
+- &mm->free_tree[drm_buddy_block_order(block)],
++ &mm->free_trees[tree][drm_buddy_block_order(block)],
+ rbtree_block_offset_less);
+ }
+
+ static void rbtree_remove(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+ {
++ unsigned int order = drm_buddy_block_order(block);
++ enum drm_buddy_free_tree tree;
+ struct rb_root *root;
+
+- root = &mm->free_tree[drm_buddy_block_order(block)];
+- rb_erase(&block->rb, root);
++ tree = get_block_tree(block);
++ root = &mm->free_trees[tree][order];
+
++ rb_erase(&block->rb, root);
+ RB_CLEAR_NODE(&block->rb);
+ }
+
+-static struct drm_buddy_block *
+-rbtree_last_entry(struct drm_buddy *mm, unsigned int order)
+-{
+- struct rb_node *node = rb_last(&mm->free_tree[order]);
+-
+- return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
+-}
+-
+-static bool rbtree_is_empty(struct drm_buddy *mm, unsigned int order)
+-{
+- return RB_EMPTY_ROOT(&mm->free_tree[order]);
+-}
+-
+ static void clear_reset(struct drm_buddy_block *block)
+ {
+ block->header &= ~DRM_BUDDY_HEADER_CLEAR;
+@@ -112,10 +134,13 @@ static void mark_allocated(struct drm_bu
+ static void mark_free(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+ {
++ enum drm_buddy_free_tree tree;
++
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_FREE;
+
+- rbtree_insert(mm, block);
++ tree = get_block_tree(block);
++ rbtree_insert(mm, block, tree);
+ }
+
+ static void mark_split(struct drm_buddy *mm,
+@@ -201,7 +226,7 @@ static int __force_merge(struct drm_budd
+ u64 end,
+ unsigned int min_order)
+ {
+- unsigned int order;
++ unsigned int tree, order;
+ int i;
+
+ if (!min_order)
+@@ -210,45 +235,48 @@ static int __force_merge(struct drm_budd
+ if (min_order > mm->max_order)
+ return -EINVAL;
+
+- for (i = min_order - 1; i >= 0; i--) {
+- struct rb_root *root = &mm->free_tree[i];
+- struct rb_node *iter;
++ for_each_free_tree(tree) {
++ for (i = min_order - 1; i >= 0; i--) {
++ struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
+
+- iter = rb_last(root);
+-
+- while (iter) {
+- struct drm_buddy_block *block, *buddy;
+- u64 block_start, block_end;
++ while (iter) {
++ struct drm_buddy_block *block, *buddy;
++ u64 block_start, block_end;
+
+- block = rbtree_get_free_block(iter);
+- iter = rb_prev(iter);
++ block = rbtree_get_free_block(iter);
++ iter = rb_prev(iter);
+
+- if (!block || !block->parent)
+- continue;
++ if (!block || !block->parent)
++ continue;
+
+- block_start = drm_buddy_block_offset(block);
+- block_end = block_start + drm_buddy_block_size(mm, block) - 1;
++ block_start = drm_buddy_block_offset(block);
++ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+
+- if (!contains(start, end, block_start, block_end))
+- continue;
++ if (!contains(start, end, block_start, block_end))
++ continue;
+
+- buddy = __get_buddy(block);
+- if (!drm_buddy_block_is_free(buddy))
+- continue;
++ buddy = __get_buddy(block);
++ if (!drm_buddy_block_is_free(buddy))
++ continue;
+
+- WARN_ON(drm_buddy_block_is_clear(block) ==
+- drm_buddy_block_is_clear(buddy));
++ WARN_ON(drm_buddy_block_is_clear(block) ==
++ drm_buddy_block_is_clear(buddy));
+
+- if (iter == &buddy->rb)
+- iter = rb_prev(iter);
++ /*
++ * Advance to the next node when the current node is the buddy,
++ * as freeing the block will also remove its buddy from the tree.
++ */
++ if (iter == &buddy->rb)
++ iter = rb_prev(iter);
+
+- rbtree_remove(mm, block);
+- if (drm_buddy_block_is_clear(block))
+- mm->clear_avail -= drm_buddy_block_size(mm, block);
++ rbtree_remove(mm, block);
++ if (drm_buddy_block_is_clear(block))
++ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+- order = __drm_buddy_free(mm, block, true);
+- if (order >= min_order)
+- return 0;
++ order = __drm_buddy_free(mm, block, true);
++ if (order >= min_order)
++ return 0;
++ }
+ }
+ }
+
+@@ -269,7 +297,7 @@ static int __force_merge(struct drm_budd
+ */
+ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+ {
+- unsigned int i;
++ unsigned int i, j, root_count = 0;
+ u64 offset = 0;
+
+ if (size < chunk_size)
+@@ -291,14 +319,22 @@ int drm_buddy_init(struct drm_buddy *mm,
+
+ BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
+
+- mm->free_tree = kmalloc_array(mm->max_order + 1,
+- sizeof(struct rb_root),
+- GFP_KERNEL);
+- if (!mm->free_tree)
++ mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES,
++ sizeof(*mm->free_trees),
++ GFP_KERNEL);
++ if (!mm->free_trees)
+ return -ENOMEM;
+
+- for (i = 0; i <= mm->max_order; ++i)
+- mm->free_tree[i] = RB_ROOT;
++ for_each_free_tree(i) {
++ mm->free_trees[i] = kmalloc_array(mm->max_order + 1,
++ sizeof(struct rb_root),
++ GFP_KERNEL);
++ if (!mm->free_trees[i])
++ goto out_free_tree;
++
++ for (j = 0; j <= mm->max_order; ++j)
++ mm->free_trees[i][j] = RB_ROOT;
++ }
+
+ mm->n_roots = hweight64(size);
+
+@@ -308,8 +344,6 @@ int drm_buddy_init(struct drm_buddy *mm,
+ if (!mm->roots)
+ goto out_free_tree;
+
+- i = 0;
+-
+ /*
+ * Split into power-of-two blocks, in case we are given a size that is
+ * not itself a power-of-two.
+@@ -328,24 +362,26 @@ int drm_buddy_init(struct drm_buddy *mm,
+
+ mark_free(mm, root);
+
+- BUG_ON(i > mm->max_order);
++ BUG_ON(root_count > mm->max_order);
+ BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
+
+- mm->roots[i] = root;
++ mm->roots[root_count] = root;
+
+ offset += root_size;
+ size -= root_size;
+- i++;
++ root_count++;
+ } while (size);
+
+ return 0;
+
+ out_free_roots:
+- while (i--)
+- drm_block_free(mm, mm->roots[i]);
++ while (root_count--)
++ drm_block_free(mm, mm->roots[root_count]);
+ kfree(mm->roots);
+ out_free_tree:
+- kfree(mm->free_tree);
++ while (i--)
++ kfree(mm->free_trees[i]);
++ kfree(mm->free_trees);
+ return -ENOMEM;
+ }
+ EXPORT_SYMBOL(drm_buddy_init);
+@@ -381,8 +417,9 @@ void drm_buddy_fini(struct drm_buddy *mm
+
+ WARN_ON(mm->avail != mm->size);
+
++ for_each_free_tree(i)
++ kfree(mm->free_trees[i]);
+ kfree(mm->roots);
+- kfree(mm->free_tree);
+ }
+ EXPORT_SYMBOL(drm_buddy_fini);
+
+@@ -406,8 +443,7 @@ static int split_block(struct drm_buddy
+ return -ENOMEM;
+ }
+
+- mark_free(mm, block->left);
+- mark_free(mm, block->right);
++ mark_split(mm, block);
+
+ if (drm_buddy_block_is_clear(block)) {
+ mark_cleared(block->left);
+@@ -415,7 +451,8 @@ static int split_block(struct drm_buddy
+ clear_reset(block);
+ }
+
+- mark_split(mm, block);
++ mark_free(mm, block->left);
++ mark_free(mm, block->right);
+
+ return 0;
+ }
+@@ -448,6 +485,7 @@ EXPORT_SYMBOL(drm_get_buddy);
+ */
+ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+ {
++ enum drm_buddy_free_tree src_tree, dst_tree;
+ u64 root_size, size, start;
+ unsigned int order;
+ int i;
+@@ -462,19 +500,24 @@ void drm_buddy_reset_clear(struct drm_bu
+ size -= root_size;
+ }
+
++ src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
++ dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
++
+ for (i = 0; i <= mm->max_order; ++i) {
++ struct rb_root *root = &mm->free_trees[src_tree][i];
+ struct drm_buddy_block *block, *tmp;
+
+- rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[i], rb) {
+- if (is_clear != drm_buddy_block_is_clear(block)) {
+- if (is_clear) {
+- mark_cleared(block);
+- mm->clear_avail += drm_buddy_block_size(mm, block);
+- } else {
+- clear_reset(block);
+- mm->clear_avail -= drm_buddy_block_size(mm, block);
+- }
++ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
++ rbtree_remove(mm, block);
++ if (is_clear) {
++ mark_cleared(block);
++ mm->clear_avail += drm_buddy_block_size(mm, block);
++ } else {
++ clear_reset(block);
++ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ }
++
++ rbtree_insert(mm, block, dst_tree);
+ }
+ }
+ }
+@@ -664,27 +707,17 @@ __drm_buddy_alloc_range_bias(struct drm_
+ }
+
+ static struct drm_buddy_block *
+-get_maxblock(struct drm_buddy *mm, unsigned int order,
+- unsigned long flags)
++get_maxblock(struct drm_buddy *mm,
++ unsigned int order,
++ enum drm_buddy_free_tree tree)
+ {
+ struct drm_buddy_block *max_block = NULL, *block = NULL;
++ struct rb_root *root;
+ unsigned int i;
+
+ for (i = order; i <= mm->max_order; ++i) {
+- struct rb_node *iter = rb_last(&mm->free_tree[i]);
+- struct drm_buddy_block *tmp_block;
+-
+- while (iter) {
+- tmp_block = rbtree_get_free_block(iter);
+-
+- if (!block_incompatible(tmp_block, flags)) {
+- block = tmp_block;
+- break;
+- }
+-
+- iter = rb_prev(iter);
+- }
+-
++ root = &mm->free_trees[tree][i];
++ block = rbtree_last_free_block(root);
+ if (!block)
+ continue;
+
+@@ -708,39 +741,37 @@ alloc_from_freetree(struct drm_buddy *mm
+ unsigned long flags)
+ {
+ struct drm_buddy_block *block = NULL;
++ struct rb_root *root;
++ enum drm_buddy_free_tree tree;
+ unsigned int tmp;
+ int err;
+
++ tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ?
++ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
++
+ if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+- block = get_maxblock(mm, order, flags);
++ block = get_maxblock(mm, order, tree);
+ if (block)
+ /* Store the obtained block order */
+ tmp = drm_buddy_block_order(block);
+ } else {
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+- struct rb_node *iter = rb_last(&mm->free_tree[tmp]);
+- struct drm_buddy_block *tmp_block;
+-
+- while (iter) {
+- tmp_block = rbtree_get_free_block(iter);
+-
+- if (!block_incompatible(tmp_block, flags)) {
+- block = tmp_block;
+- break;
+- }
+-
+- iter = rb_prev(iter);
+- }
+-
++ /* Get RB tree root for this order and tree */
++ root = &mm->free_trees[tree][tmp];
++ block = rbtree_last_free_block(root);
+ if (block)
+ break;
+ }
+ }
+
+ if (!block) {
+- /* Fallback method */
++ /* Try allocating from the other tree */
++ tree = (tree == DRM_BUDDY_CLEAR_TREE) ?
++ DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
++
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+- block = rbtree_last_entry(mm, tmp);
++ root = &mm->free_trees[tree][tmp];
++ block = rbtree_last_free_block(root);
+ if (block)
+ break;
+ }
+@@ -885,10 +916,9 @@ static int __alloc_contig_try_harder(str
+ {
+ u64 rhs_offset, lhs_offset, lhs_size, filled;
+ struct drm_buddy_block *block;
++ unsigned int tree, order;
+ LIST_HEAD(blocks_lhs);
+- struct rb_node *iter;
+ unsigned long pages;
+- unsigned int order;
+ u64 modify_size;
+ int err;
+
+@@ -898,40 +928,45 @@ static int __alloc_contig_try_harder(str
+ if (order == 0)
+ return -ENOSPC;
+
+- if (rbtree_is_empty(mm, order))
+- return -ENOSPC;
++ for_each_free_tree(tree) {
++ struct rb_root *root;
++ struct rb_node *iter;
+
+- iter = rb_last(&mm->free_tree[order]);
++ root = &mm->free_trees[tree][order];
++ if (rbtree_is_empty(root))
++ continue;
+
+- while (iter) {
+- block = rbtree_get_free_block(iter);
++ iter = rb_last(root);
++ while (iter) {
++ block = rbtree_get_free_block(iter);
+
+- /* Allocate blocks traversing RHS */
+- rhs_offset = drm_buddy_block_offset(block);
+- err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+- &filled, blocks);
+- if (!err || err != -ENOSPC)
+- return err;
+-
+- lhs_size = max((size - filled), min_block_size);
+- if (!IS_ALIGNED(lhs_size, min_block_size))
+- lhs_size = round_up(lhs_size, min_block_size);
+-
+- /* Allocate blocks traversing LHS */
+- lhs_offset = drm_buddy_block_offset(block) - lhs_size;
+- err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
+- NULL, &blocks_lhs);
+- if (!err) {
+- list_splice(&blocks_lhs, blocks);
+- return 0;
+- } else if (err != -ENOSPC) {
++ /* Allocate blocks traversing RHS */
++ rhs_offset = drm_buddy_block_offset(block);
++ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
++ &filled, blocks);
++ if (!err || err != -ENOSPC)
++ return err;
++
++ lhs_size = max((size - filled), min_block_size);
++ if (!IS_ALIGNED(lhs_size, min_block_size))
++ lhs_size = round_up(lhs_size, min_block_size);
++
++ /* Allocate blocks traversing LHS */
++ lhs_offset = drm_buddy_block_offset(block) - lhs_size;
++ err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
++ NULL, &blocks_lhs);
++ if (!err) {
++ list_splice(&blocks_lhs, blocks);
++ return 0;
++ } else if (err != -ENOSPC) {
++ drm_buddy_free_list_internal(mm, blocks);
++ return err;
++ }
++ /* Free blocks for the next iteration */
+ drm_buddy_free_list_internal(mm, blocks);
+- return err;
+- }
+- /* Free blocks for the next iteration */
+- drm_buddy_free_list_internal(mm, blocks);
+
+- iter = rb_prev(iter);
++ iter = rb_prev(iter);
++ }
+ }
+
+ return -ENOSPC;
+@@ -1243,11 +1278,17 @@ void drm_buddy_print(struct drm_buddy *m
+
+ for (order = mm->max_order; order >= 0; order--) {
+ struct drm_buddy_block *block, *tmp;
++ struct rb_root *root;
+ u64 count = 0, free;
++ unsigned int tree;
++
++ for_each_free_tree(tree) {
++ root = &mm->free_trees[tree][order];
+
+- rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[order], rb) {
+- BUG_ON(!drm_buddy_block_is_free(block));
+- count++;
++ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
++ BUG_ON(!drm_buddy_block_is_free(block));
++ count++;
++ }
+ }
+
+ drm_printf(p, "order-%2d ", order);
+--- a/include/drm/drm_buddy.h
++++ b/include/drm/drm_buddy.h
+@@ -64,7 +64,7 @@ struct drm_buddy_block {
+ */
+ struct drm_buddy {
+ /* Maintain a free list for each order. */
+- struct rb_root *free_tree;
++ struct rb_root **free_trees;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
--- /dev/null
+From 630efee9493cf64ff7b9a1652978807fef385fdd Mon Sep 17 00:00:00 2001
+From: Karol Wachowski <karol.wachowski@linux.intel.com>
+Date: Fri, 12 Dec 2025 14:41:33 +0100
+Subject: drm: Fix object leak in DRM_IOCTL_GEM_CHANGE_HANDLE
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Karol Wachowski <karol.wachowski@linux.intel.com>
+
+commit 630efee9493cf64ff7b9a1652978807fef385fdd upstream.
+
+Add missing drm_gem_object_put() call when drm_gem_object_lookup()
+successfully returns an object. This fixes a GEM object reference
+leak that can prevent driver modules from unloading when using
+prime buffers.
+
+Fixes: 53096728b891 ("drm: Add DRM prime interface to reassign GEM handle")
+Cc: <stable@vger.kernel.org> # v6.18+
+Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://lore.kernel.org/r/20251212134133.475218-1-karol.wachowski@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_gem.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index f884d155a832..3b9df655e837 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -979,8 +979,10 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ if (!obj)
+ return -ENOENT;
+
+- if (args->handle == args->new_handle)
+- return 0;
++ if (args->handle == args->new_handle) {
++ ret = 0;
++ goto out;
++ }
+
+ mutex_lock(&file_priv->prime.lock);
+
+@@ -1012,6 +1014,8 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+
+ out_unlock:
+ mutex_unlock(&file_priv->prime.lock);
++out:
++ drm_gem_object_put(obj);
+
+ return ret;
+ }
+--
+2.52.0
+
--- /dev/null
+From be729f9de6c64240645dc80a24162ac4d3fe00a8 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Mon, 29 Sep 2025 10:23:23 +0200
+Subject: drm/gma500: Remove unused helper psb_fbdev_fb_setcolreg()
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit be729f9de6c64240645dc80a24162ac4d3fe00a8 upstream.
+
+Remove psb_fbdev_fb_setcolreg(), which hasn't been called in almost
+a decade.
+
+Gma500 commit 4d8d096e9ae8 ("gma500: introduce the framebuffer support
+code") added the helper psb_fbdev_fb_setcolreg() for setting the fbdev
+palette via fbdev's fb_setcolreg callback. Later
+commit 3da6c2f3b730 ("drm/gma500: use DRM_FB_HELPER_DEFAULT_OPS for
+fb_ops") set several default helpers for fbdev emulation, including
+fb_setcmap.
+
+The fbdev subsystem always prefers fb_setcmap over fb_setcolreg. [1]
+Hence, the gma500 code is no longer in use and gma500 has been using
+drm_fb_helper_setcmap() for several years without issues.
+
+Fixes: 3da6c2f3b730 ("drm/gma500: use DRM_FB_HELPER_DEFAULT_OPS for fb_ops")
+Cc: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Cc: Stefan Christ <contact@stefanchrist.eu>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v4.10+
+Link: https://elixir.bootlin.com/linux/v6.16.9/source/drivers/video/fbdev/core/fbcmap.c#L246 # [1]
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Acked-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Link: https://lore.kernel.org/r/20250929082338.18845-1-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/gma500/fbdev.c | 43 -----------------------------------------
+ 1 file changed, 43 deletions(-)
+
+--- a/drivers/gpu/drm/gma500/fbdev.c
++++ b/drivers/gpu/drm/gma500/fbdev.c
+@@ -50,48 +50,6 @@ static const struct vm_operations_struct
+ * struct fb_ops
+ */
+
+-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+-
+-static int psb_fbdev_fb_setcolreg(unsigned int regno,
+- unsigned int red, unsigned int green,
+- unsigned int blue, unsigned int transp,
+- struct fb_info *info)
+-{
+- struct drm_fb_helper *fb_helper = info->par;
+- struct drm_framebuffer *fb = fb_helper->fb;
+- uint32_t v;
+-
+- if (!fb)
+- return -ENOMEM;
+-
+- if (regno > 255)
+- return 1;
+-
+- red = CMAP_TOHW(red, info->var.red.length);
+- blue = CMAP_TOHW(blue, info->var.blue.length);
+- green = CMAP_TOHW(green, info->var.green.length);
+- transp = CMAP_TOHW(transp, info->var.transp.length);
+-
+- v = (red << info->var.red.offset) |
+- (green << info->var.green.offset) |
+- (blue << info->var.blue.offset) |
+- (transp << info->var.transp.offset);
+-
+- if (regno < 16) {
+- switch (fb->format->cpp[0] * 8) {
+- case 16:
+- ((uint32_t *) info->pseudo_palette)[regno] = v;
+- break;
+- case 24:
+- case 32:
+- ((uint32_t *) info->pseudo_palette)[regno] = v;
+- break;
+- }
+- }
+-
+- return 0;
+-}
+-
+ static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+ {
+ if (vma->vm_pgoff != 0)
+@@ -135,7 +93,6 @@ static const struct fb_ops psb_fbdev_fb_
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ DRM_FB_HELPER_DEFAULT_OPS,
+- .fb_setcolreg = psb_fbdev_fb_setcolreg,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
+ .fb_mmap = psb_fbdev_fb_mmap,
+ .fb_destroy = psb_fbdev_fb_destroy,
--- /dev/null
+From 1c7f9e528f8f488b060b786bfb90b40540854db3 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri, 5 Dec 2025 12:35:01 +0100
+Subject: drm/i915: Fix format string truncation warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 1c7f9e528f8f488b060b786bfb90b40540854db3 upstream.
+
+GCC notices that the 16-byte uabi_name field could theoretically be too
+small for the formatted string if the instance number exceeds 100.
+
+So grow the field to 20 bytes.
+
+drivers/gpu/drm/i915/intel_memory_region.c: In function ‘intel_memory_region_create’:
+drivers/gpu/drm/i915/intel_memory_region.c:273:61: error: ‘%u’ directive output may be truncated writing between 1 and 5 bytes into a region of size between 3 and 11 [-Werror=format-truncation=]
+ 273 | snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ | ^~
+drivers/gpu/drm/i915/intel_memory_region.c:273:58: note: directive argument in the range [0, 65535]
+ 273 | snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ | ^~~~~~
+drivers/gpu/drm/i915/intel_memory_region.c:273:9: note: ‘snprintf’ output between 7 and 19 bytes into a destination of size 16
+ 273 | snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 274 | intel_memory_type_str(type), instance);
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Fixes: 3b38d3515753 ("drm/i915: Add stable memory region names")
+Cc: <stable@vger.kernel.org> # v6.8+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+Link: https://lore.kernel.org/r/20251205113500.684286-2-ardb@kernel.org
+(cherry picked from commit 18476087f1a18dc279d200d934ad94fba1fb51d5)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/intel_memory_region.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_memory_region.h
++++ b/drivers/gpu/drm/i915/intel_memory_region.h
+@@ -72,7 +72,7 @@ struct intel_memory_region {
+ u16 instance;
+ enum intel_region_id id;
+ char name[16];
+- char uabi_name[16];
++ char uabi_name[20];
+ bool private; /* not for userspace */
+
+ struct {
--- /dev/null
+From 4fe2bd195435e71c117983d87f278112c5ab364c Mon Sep 17 00:00:00 2001
+From: Krzysztof Niemiec <krzysztof.niemiec@intel.com>
+Date: Tue, 16 Dec 2025 19:09:01 +0100
+Subject: drm/i915/gem: Zero-initialize the eb.vma array in i915_gem_do_execbuffer
+
+From: Krzysztof Niemiec <krzysztof.niemiec@intel.com>
+
+commit 4fe2bd195435e71c117983d87f278112c5ab364c upstream.
+
+Initialize the eb.vma array with values of 0 when the eb structure is
+first set up. In particular, this sets the eb->vma[i].vma pointers to
+NULL, simplifying cleanup and getting rid of the bug described below.
+
+During the execution of eb_lookup_vmas(), the eb->vma array is
+successively filled up with struct eb_vma objects. This process includes
+calling eb_add_vma(), which might fail; however, even in the event of
+failure, eb->vma[i].vma is set for the currently processed buffer.
+
+If eb_add_vma() fails, eb_lookup_vmas() returns with an error, which
+prompts a call to eb_release_vmas() to clean up the mess. Since
+eb_lookup_vmas() might fail during processing any (possibly not first)
+buffer, eb_release_vmas() checks whether a buffer's vma is NULL to know
+at what point did the lookup function fail.
+
+In eb_lookup_vmas(), eb->vma[i].vma is set to NULL if either the helper
+function eb_lookup_vma() or eb_validate_vma() fails. eb->vma[i+1].vma is
+set to NULL in case i915_gem_object_userptr_submit_init() fails; the
+current one needs to be cleaned up by eb_release_vmas() at this point,
+so the next one is set. If eb_add_vma() fails, neither the current nor
+the next vma is set to NULL, which is a source of a NULL deref bug
+described in the issue linked in the Closes tag.
+
+When entering eb_lookup_vmas(), the vma pointers are set to the slab
+poison value, instead of NULL. This doesn't matter for the actual
+lookup, since it gets overwritten anyway, however the eb_release_vmas()
+function only recognizes NULL as the stopping value, hence the pointers
+are being set to NULL as they go in case of intermediate failure. This
+patch changes the approach to filling them all with NULL at the start
+instead, rather than handling that manually during failure.
+
+Reported-by: Gangmin Kim <km.kim1503@gmail.com>
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/15062
+Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
+Cc: stable@vger.kernel.org # 5.16.x
+Signed-off-by: Krzysztof Niemiec <krzysztof.niemiec@intel.com>
+Reviewed-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20251216180900.54294-2-krzysztof.niemiec@intel.com
+(cherry picked from commit 08889b706d4f0b8d2352b7ca29c2d8df4d0787cd)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 37 +++++++++++--------------
+ 1 file changed, 17 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -950,13 +950,13 @@ static int eb_lookup_vmas(struct i915_ex
+ vma = eb_lookup_vma(eb, eb->exec[i].handle);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+- goto err;
++ return err;
+ }
+
+ err = eb_validate_vma(eb, &eb->exec[i], vma);
+ if (unlikely(err)) {
+ i915_vma_put(vma);
+- goto err;
++ return err;
+ }
+
+ err = eb_add_vma(eb, ¤t_batch, i, vma);
+@@ -965,19 +965,8 @@ static int eb_lookup_vmas(struct i915_ex
+
+ if (i915_gem_object_is_userptr(vma->obj)) {
+ err = i915_gem_object_userptr_submit_init(vma->obj);
+- if (err) {
+- if (i + 1 < eb->buffer_count) {
+- /*
+- * Execbuffer code expects last vma entry to be NULL,
+- * since we already initialized this entry,
+- * set the next value to NULL or we mess up
+- * cleanup handling.
+- */
+- eb->vma[i + 1].vma = NULL;
+- }
+-
++ if (err)
+ return err;
+- }
+
+ eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
+ eb->args->flags |= __EXEC_USERPTR_USED;
+@@ -985,10 +974,6 @@ static int eb_lookup_vmas(struct i915_ex
+ }
+
+ return 0;
+-
+-err:
+- eb->vma[i].vma = NULL;
+- return err;
+ }
+
+ static int eb_lock_vmas(struct i915_execbuffer *eb)
+@@ -3374,7 +3359,8 @@ i915_gem_do_execbuffer(struct drm_device
+
+ eb.exec = exec;
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+- eb.vma[0].vma = NULL;
++ memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma));
++
+ eb.batch_pool = NULL;
+
+ eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
+@@ -3583,7 +3569,18 @@ i915_gem_execbuffer2_ioctl(struct drm_de
+ if (err)
+ return err;
+
+- /* Allocate extra slots for use by the command parser */
++ /*
++ * Allocate extra slots for use by the command parser.
++ *
++ * Note that this allocation handles two different arrays (the
++ * exec2_list array, and the eventual eb.vma array introduced in
++ * i915_gem_do_execbuffer()), that reside in virtually contiguous
++ * memory. Also note that the allocation intentionally doesn't fill the
++ * area with zeros, because the exec2_list part doesn't need to be, as
++ * it's immediately overwritten by user data a few lines below.
++ * However, the eb.vma part is explicitly zeroed later in
++ * i915_gem_do_execbuffer().
++ */
+ exec2_list = kvmalloc_array(count + 2, eb_element_size(),
+ __GFP_NOWARN | GFP_KERNEL);
+ if (exec2_list == NULL) {
--- /dev/null
+From 6b991ad8dc3abfe5720fc2e9ee96be63ae43e362 Mon Sep 17 00:00:00 2001
+From: Alessio Belle <alessio.belle@imgtec.com>
+Date: Mon, 8 Dec 2025 09:11:00 +0000
+Subject: drm/imagination: Disallow exporting of PM/FW protected objects
+
+From: Alessio Belle <alessio.belle@imgtec.com>
+
+commit 6b991ad8dc3abfe5720fc2e9ee96be63ae43e362 upstream.
+
+These objects are meant to be used by the GPU firmware or by the PM unit
+within the GPU, in which case they may contain physical addresses.
+
+This adds a layer of protection against exposing potentially exploitable
+information outside of the driver.
+
+Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code")
+Signed-off-by: Alessio Belle <alessio.belle@imgtec.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251208-no-export-pm-fw-obj-v1-1-83ab12c61693@imgtec.com
+Signed-off-by: Matt Coster <matt.coster@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/imagination/pvr_gem.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/gpu/drm/imagination/pvr_gem.c
++++ b/drivers/gpu/drm/imagination/pvr_gem.c
+@@ -28,6 +28,16 @@ static void pvr_gem_object_free(struct d
+ drm_gem_shmem_object_free(obj);
+ }
+
++static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
++{
++ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
++
++ if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
++ return ERR_PTR(-EPERM);
++
++ return drm_gem_prime_export(obj, flags);
++}
++
+ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
+ {
+ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
+@@ -42,6 +52,7 @@ static int pvr_gem_mmap(struct drm_gem_o
+ static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
+ .free = pvr_gem_object_free,
+ .print_info = drm_gem_shmem_object_print_info,
++ .export = pvr_gem_export,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
--- /dev/null
+From a846505a193d7492ad3531e33cacfca31e4bcdd1 Mon Sep 17 00:00:00 2001
+From: Miaoqian Lin <linmq006@gmail.com>
+Date: Wed, 29 Oct 2025 15:23:06 +0800
+Subject: drm/mediatek: Fix device node reference leak in mtk_dp_dt_parse()
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+commit a846505a193d7492ad3531e33cacfca31e4bcdd1 upstream.
+
+The function mtk_dp_dt_parse() calls of_graph_get_endpoint_by_regs()
+to get the endpoint device node, but fails to call of_node_put() to release
+the reference when the function returns. This results in a device node
+reference leak.
+
+Fix this by adding the missing of_node_put() call before returning from
+the function.
+
+Found via static analysis and code review.
+
+Fixes: f70ac097a2cf ("drm/mediatek: Add MT8195 Embedded DisplayPort driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Reviewed-by: CK Hu <ck.hu@mediatek.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20251029072307.10955-1-linmq006@gmail.com/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_dp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -2087,6 +2087,7 @@ static int mtk_dp_dt_parse(struct mtk_dp
+ endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1);
+ len = of_property_count_elems_of_size(endpoint,
+ "data-lanes", sizeof(u32));
++ of_node_put(endpoint);
+ if (len < 0 || len > 4 || len == 3) {
+ dev_err(dev, "invalid data lane size: %d\n", len);
+ return -EINVAL;
--- /dev/null
+From 2a2a04be8e869a19c9f950b89b1e05832a0f7ec7 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:38 +0200
+Subject: drm/mediatek: Fix probe device leaks
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 2a2a04be8e869a19c9f950b89b1e05832a0f7ec7 upstream.
+
+Make sure to drop the reference taken to each component device during
+probe on probe failure (e.g. probe deferral) and on driver unbind.
+
+Fixes: 6ea6f8276725 ("drm/mediatek: Use correct device pointer to get CMDQ client register")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-4-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+@@ -621,6 +621,13 @@ int mtk_find_possible_crtcs(struct drm_d
+ return ret;
+ }
+
++static void mtk_ddp_comp_put_device(void *_dev)
++{
++ struct device *dev = _dev;
++
++ put_device(dev);
++}
++
+ static void mtk_ddp_comp_clk_put(void *_clk)
+ {
+ struct clk *clk = _clk;
+@@ -656,6 +663,10 @@ int mtk_ddp_comp_init(struct device *dev
+ }
+ comp->dev = &comp_pdev->dev;
+
++ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_put_device, comp->dev);
++ if (ret)
++ return ret;
++
+ if (type == MTK_DISP_AAL ||
+ type == MTK_DISP_BLS ||
+ type == MTK_DISP_CCORR ||
--- /dev/null
+From 5e49200593f331cd0629b5376fab9192f698e8ef Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:37 +0200
+Subject: drm/mediatek: Fix probe memory leak
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 5e49200593f331cd0629b5376fab9192f698e8ef upstream.
+
+The Mediatek DRM driver allocates private data for components without a
+platform driver but as the lifetime is tied to each component device,
+the memory is never freed.
+
+Tie the allocation lifetime to the DRM platform device so that the
+memory is released on probe failure (e.g. probe deferral) and when the
+driver is unbound.
+
+Fixes: c0d36de868a6 ("drm/mediatek: Move clk info from struct mtk_ddp_comp to sub driver private data")
+Cc: stable@vger.kernel.org # 5.12
+Cc: CK Hu <ck.hu@mediatek.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-3-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+@@ -671,7 +671,7 @@ int mtk_ddp_comp_init(struct device *dev
+ type == MTK_DSI)
+ return 0;
+
+- priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
--- /dev/null
+From 07c7c640a8eb9e196f357d15d88a59602a947197 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:36 +0200
+Subject: drm/mediatek: Fix probe resource leaks
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 07c7c640a8eb9e196f357d15d88a59602a947197 upstream.
+
+Make sure to unmap and release the component iomap and clock on probe
+failure (e.g. probe deferral) and on driver unbind.
+
+Note that unlike of_iomap(), devm_of_iomap() also checks whether the
+region is already mapped.
+
+Fixes: 119f5173628a ("drm/mediatek: Add DRM Driver for Mediatek SoC MT8173.")
+Cc: stable@vger.kernel.org # 4.7
+Cc: CK Hu <ck.hu@mediatek.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-2-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 20 ++++++++++++++++----
+ drivers/gpu/drm/mediatek/mtk_ddp_comp.h | 2 +-
+ drivers/gpu/drm/mediatek/mtk_drm_drv.c | 4 ++--
+ 3 files changed, 19 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+@@ -621,15 +621,20 @@ int mtk_find_possible_crtcs(struct drm_d
+ return ret;
+ }
+
+-int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
++static void mtk_ddp_comp_clk_put(void *_clk)
++{
++ struct clk *clk = _clk;
++
++ clk_put(clk);
++}
++
++int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp,
+ unsigned int comp_id)
+ {
+ struct platform_device *comp_pdev;
+ enum mtk_ddp_comp_type type;
+ struct mtk_ddp_comp_dev *priv;
+-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ int ret;
+-#endif
+
+ if (comp_id >= DDP_COMPONENT_DRM_ID_MAX)
+ return -EINVAL;
+@@ -670,11 +675,18 @@ int mtk_ddp_comp_init(struct device_node
+ if (!priv)
+ return -ENOMEM;
+
+- priv->regs = of_iomap(node, 0);
++ priv->regs = devm_of_iomap(dev, node, 0, NULL);
++ if (IS_ERR(priv->regs))
++ return PTR_ERR(priv->regs);
++
+ priv->clk = of_clk_get(node, 0);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
++ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_clk_put, priv->clk);
++ if (ret)
++ return ret;
++
+ #if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0);
+ if (ret)
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+@@ -350,7 +350,7 @@ static inline void mtk_ddp_comp_encoder_
+ int mtk_ddp_comp_get_id(struct device_node *node,
+ enum mtk_ddp_comp_type comp_type);
+ int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev);
+-int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
++int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp,
+ unsigned int comp_id);
+ enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id);
+ void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -1123,7 +1123,7 @@ static int mtk_drm_probe(struct platform
+ (void *)private->mmsys_dev,
+ sizeof(*private->mmsys_dev));
+ private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR].dev = &ovl_adaptor->dev;
+- mtk_ddp_comp_init(NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR],
++ mtk_ddp_comp_init(dev, NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR],
+ DDP_COMPONENT_DRM_OVL_ADAPTOR);
+ component_match_add(dev, &match, compare_dev, &ovl_adaptor->dev);
+ }
+@@ -1189,7 +1189,7 @@ static int mtk_drm_probe(struct platform
+ node);
+ }
+
+- ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id);
++ ret = mtk_ddp_comp_init(dev, node, &private->ddp_comp[comp_id], comp_id);
+ if (ret) {
+ of_node_put(node);
+ goto err_node;
--- /dev/null
+From 9545bae5c8acd5a47af7add606718d94578bd838 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:39 +0200
+Subject: drm/mediatek: mtk_hdmi: Fix probe device leaks
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 9545bae5c8acd5a47af7add606718d94578bd838 upstream.
+
+Make sure to drop the references to the DDC adapter and CEC device
+taken during probe on probe failure (e.g. probe deferral) and on driver
+unbind.
+
+Fixes: 8f83f26891e1 ("drm/mediatek: Add HDMI support")
+Cc: stable@vger.kernel.org # 4.8
+Cc: Jie Qiu <jie.qiu@mediatek.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-5-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_hdmi.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -1345,6 +1345,13 @@ static const struct drm_bridge_funcs mtk
+ .edid_read = mtk_hdmi_bridge_edid_read,
+ };
+
++static void mtk_hdmi_put_device(void *_dev)
++{
++ struct device *dev = _dev;
++
++ put_device(dev);
++}
++
+ static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np)
+ {
+ struct platform_device *cec_pdev;
+@@ -1369,6 +1376,10 @@ static int mtk_hdmi_get_cec_dev(struct m
+ }
+ of_node_put(cec_np);
+
++ ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &cec_pdev->dev);
++ if (ret)
++ return ret;
++
+ /*
+ * The mediatek,syscon-hdmi property contains a phandle link to the
+ * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
+@@ -1423,6 +1434,10 @@ static int mtk_hdmi_dt_parse_pdata(struc
+ if (!hdmi->ddc_adpt)
+ return dev_err_probe(dev, -EINVAL, "Failed to get ddc i2c adapter by node\n");
+
++ ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &hdmi->ddc_adpt->dev);
++ if (ret)
++ return ret;
++
+ ret = mtk_hdmi_get_cec_dev(hdmi, dev, np);
+ if (ret)
+ return ret;
--- /dev/null
+From e0f44f74ed6313e50b38eb39a2c7f210ae208db2 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 23 Sep 2025 17:23:40 +0200
+Subject: drm/mediatek: ovl_adaptor: Fix probe device leaks
+
+From: Johan Hovold <johan@kernel.org>
+
+commit e0f44f74ed6313e50b38eb39a2c7f210ae208db2 upstream.
+
+Make sure to drop the references taken to the component devices by
+of_find_device_by_node() during probe on probe failure (e.g. probe
+deferral) and on driver unbind.
+
+Fixes: 453c3364632a ("drm/mediatek: Add ovl_adaptor support for MT8195")
+Cc: stable@vger.kernel.org # 6.4
+Cc: Nancy.Lin <nancy.lin@mediatek.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250923152340.18234-6-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+@@ -527,6 +527,13 @@ bool mtk_ovl_adaptor_is_comp_present(str
+ type == OVL_ADAPTOR_TYPE_PADDING;
+ }
+
++static void ovl_adaptor_put_device(void *_dev)
++{
++ struct device *dev = _dev;
++
++ put_device(dev);
++}
++
+ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
+ {
+ struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
+@@ -560,6 +567,11 @@ static int ovl_adaptor_comp_init(struct
+ if (!comp_pdev)
+ return -EPROBE_DEFER;
+
++ ret = devm_add_action_or_reset(dev, ovl_adaptor_put_device,
++ &comp_pdev->dev);
++ if (ret)
++ return ret;
++
+ priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
+
+ drm_of_component_match_add(dev, match, component_compare_of, node);
--- /dev/null
+From 6cb31fba137d45e682ce455b8ea364f44d5d4f98 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ren=C3=A9=20Rebe?= <rene@exactco.de>
+Date: Mon, 8 Dec 2025 14:18:27 +0100
+Subject: drm/mgag200: Fix big-endian support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: René Rebe <rene@exactco.de>
+
+commit 6cb31fba137d45e682ce455b8ea364f44d5d4f98 upstream.
+
+Unlike the original, deleted Matrox mga driver, the new mgag200 driver
+has the XRGB frame-buffer byte swapped on big-endian "RISC"
+systems. Fix by enabling byte swapping "PowerPC" OPMODE for any
+__BIG_ENDIAN config.
+
+Fixes: 414c45310625 ("mgag200: initial g200se driver (v2)")
+Signed-off-by: René Rebe <rene@exactco.de>
+Cc: stable@kernel.org
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patch.msgid.link/20251208.141827.965103015954471168.rene@exactco.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mgag200/mgag200_mode.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -161,6 +161,30 @@ static void mgag200_set_startadd(struct
+ WREG_ECRT(0x00, crtcext0);
+ }
+
++/*
++ * Set the opmode for the hardware swapper for Big-Endian processor
++ * support for the frame buffer aperture and DMAWIN space.
++ */
++static void mgag200_set_datasiz(struct mga_device *mdev, u32 format)
++{
++#if defined(__BIG_ENDIAN)
++ u32 opmode = RREG32(MGAREG_OPMODE);
++
++ opmode &= ~(GENMASK(17, 16) | GENMASK(9, 8) | GENMASK(3, 2));
++
++ /* Big-endian byte-swapping */
++ switch (format) {
++ case DRM_FORMAT_RGB565:
++ opmode |= 0x10100;
++ break;
++ case DRM_FORMAT_XRGB8888:
++ opmode |= 0x20200;
++ break;
++ }
++ WREG32(MGAREG_OPMODE, opmode);
++#endif
++}
++
+ void mgag200_init_registers(struct mga_device *mdev)
+ {
+ u8 crtc11, misc;
+@@ -496,6 +520,7 @@ void mgag200_primary_plane_helper_atomic
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+
++ mgag200_set_datasiz(mdev, fb->format->format);
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage);
--- /dev/null
+From 779b68a5bf2764c8ed3aa800e41ba0d5d007e1e7 Mon Sep 17 00:00:00 2001
+From: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+Date: Tue, 18 Nov 2025 14:20:28 +0530
+Subject: drm/msm/a6xx: Fix out of bound IO access in a6xx_get_gmu_registers
+
+From: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+
+commit 779b68a5bf2764c8ed3aa800e41ba0d5d007e1e7 upstream.
+
+REG_A6XX_GMU_AO_AHB_FENCE_CTRL register falls under GMU's register
+range. So, use gmu_write() routines to write to this register.
+
+Fixes: 1707add81551 ("drm/msm/a6xx: Add a6xx gpu state")
+Cc: stable@vger.kernel.org
+Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Patchwork: https://patchwork.freedesktop.org/patch/688993/
+Message-ID: <20251118-kaana-gpu-support-v4-1-86eeb8e93fb6@oss.qualcomm.com>
+Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -1255,7 +1255,7 @@ static void a6xx_get_gmu_registers(struc
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+- gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
++ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
+ &a6xx_state->gmu_registers[3], false);
--- /dev/null
+From 6c6915bfea212d32844b2b7f22bc1aa3669eabc4 Mon Sep 17 00:00:00 2001
+From: Anna Maniscalco <anna.maniscalco2000@gmail.com>
+Date: Thu, 27 Nov 2025 19:22:35 +0100
+Subject: drm/msm: add PERFCTR_CNTL to ifpc_reglist
+
+From: Anna Maniscalco <anna.maniscalco2000@gmail.com>
+
+commit 6c6915bfea212d32844b2b7f22bc1aa3669eabc4 upstream.
+
+Previously this register would become 0 after IFPC took place which
+broke all usages of counters.
+
+Fixes: a6a0157cc68e ("drm/msm/a6xx: Enable IFPC on Adreno X1-85")
+Cc: stable@vger.kernel.org
+Signed-off-by: Anna Maniscalco <anna.maniscalco2000@gmail.com>
+Reviewed-by: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Patchwork: https://patchwork.freedesktop.org/patch/690960/
+Message-ID: <20251127-ifpc_counters-v3-1-fac0a126bc88@gmail.com>
+Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_catalog.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+@@ -1360,6 +1360,7 @@ static const u32 a750_ifpc_reglist_regs[
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
++ REG_A6XX_RBBM_PERFCTR_CNTL,
+ REG_A6XX_TPL1_NC_MODE_CNTL,
+ REG_A6XX_SP_NC_MODE_CNTL,
+ REG_A6XX_CP_DBG_ECO_CNTL,
--- /dev/null
+From 88733a0b64872357e5ecd82b7488121503cb9cc6 Mon Sep 17 00:00:00 2001
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+Date: Thu, 11 Dec 2025 12:36:30 +0300
+Subject: drm/msm/dpu: Add missing NULL pointer check for pingpong interface
+
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+
+commit 88733a0b64872357e5ecd82b7488121503cb9cc6 upstream.
+
+It is checked almost always in dpu_encoder_phys_wb_setup_ctl(), but in a
+single place the check is missing.
+Also use convenient locals instead of phys_enc->* where available.
+
+Cc: stable@vger.kernel.org
+Fixes: d7d0e73f7de33 ("drm/msm/dpu: introduce the dpu_encoder_phys_* for writeback")
+Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Patchwork: https://patchwork.freedesktop.org/patch/693860/
+Link: https://lore.kernel.org/r/20251211093630.171014-1-kniv@yandex-team.ru
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -247,14 +247,12 @@ static void dpu_encoder_phys_wb_setup_ct
+ if (hw_cdm)
+ intf_cfg.cdm = hw_cdm->idx;
+
+- if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
+- phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+- mode_3d);
++ if (hw_pp && hw_pp->merge_3d && hw_pp->merge_3d->ops.setup_3d_mode)
++ hw_pp->merge_3d->ops.setup_3d_mode(hw_pp->merge_3d, mode_3d);
+
+ /* setup which pp blk will connect to this wb */
+- if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
+- phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb,
+- phys_enc->hw_pp->idx);
++ if (hw_pp && hw_wb->ops.bind_pingpong_blk)
++ hw_wb->ops.bind_pingpong_blk(hw_wb, hw_pp->idx);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
--- /dev/null
+From 560271e10b2c86e95ea35afa9e79822e4847f07a Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Thu, 11 Dec 2025 14:02:54 -0500
+Subject: drm/nouveau/dispnv50: Don't call drm_atomic_get_crtc_state() in prepare_fb
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 560271e10b2c86e95ea35afa9e79822e4847f07a upstream.
+
+Since we recently started warning about uses of this function after the
+atomic check phase completes, we've started getting warnings about this in
+nouveau. It appears a misplaced drm_atomic_get_crtc_state() call has been
+hiding in our .prepare_fb callback for a while.
+
+So, fix this by adding a new nv50_head_atom_get_new() function and use that
+in our .prepare_fb callback instead.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Dave Airlie <airlied@redhat.com>
+Fixes: 1590700d94ac ("drm/nouveau/kms/nv50-: split each resource type into their own source files")
+Cc: <stable@vger.kernel.org> # v4.18+
+Link: https://patch.msgid.link/20251211190256.396742-1-lyude@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/dispnv50/atom.h | 13 +++++++++++++
+ drivers/gpu/drm/nouveau/dispnv50/wndw.c | 2 +-
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
+@@ -152,8 +152,21 @@ static inline struct nv50_head_atom *
+ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+ {
+ struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
++
+ if (IS_ERR(statec))
+ return (void *)statec;
++
++ return nv50_head_atom(statec);
++}
++
++static inline struct nv50_head_atom *
++nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc)
++{
++ struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc);
++
++ if (!statec)
++ return NULL;
++
+ return nv50_head_atom(statec);
+ }
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -583,7 +583,7 @@ nv50_wndw_prepare_fb(struct drm_plane *p
+ asyw->image.offset[0] = nvbo->offset;
+
+ if (wndw->func->prepare) {
+- asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
++ asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+
--- /dev/null
+From da67179e5538b473a47c87e87cb35b1a7551ad9b Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Tue, 2 Dec 2025 12:59:12 -0500
+Subject: drm/nouveau/gsp: Allocate fwsec-sb at boot
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit da67179e5538b473a47c87e87cb35b1a7551ad9b upstream.
+
+At the moment - the memory allocation for fwsec-sb is created as-needed and
+is released after being used. Typically this is at some point well after
+driver load, which can cause runtime suspend/resume to initially work on
+driver load but then later fail on a machine that has been running for long
+enough with sufficiently high enough memory pressure:
+
+ kworker/7:1: page allocation failure: order:5, mode:0xcc0(GFP_KERNEL),
+ nodemask=(null),cpuset=/,mems_allowed=0
+ CPU: 7 UID: 0 PID: 875159 Comm: kworker/7:1 Not tainted
+ 6.17.8-300.fc43.x86_64 #1 PREEMPT(lazy)
+ Hardware name: SLIMBOOK Executive/Executive, BIOS N.1.10GRU06 02/02/2024
+ Workqueue: pm pm_runtime_work
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x5d/0x80
+ warn_alloc+0x163/0x190
+ ? __alloc_pages_direct_compact+0x1b3/0x220
+ __alloc_pages_slowpath.constprop.0+0x57a/0xb10
+ __alloc_frozen_pages_noprof+0x334/0x350
+ __alloc_pages_noprof+0xe/0x20
+ __dma_direct_alloc_pages.isra.0+0x1eb/0x330
+ dma_direct_alloc_pages+0x3c/0x190
+ dma_alloc_pages+0x29/0x130
+ nvkm_firmware_ctor+0x1ae/0x280 [nouveau]
+ nvkm_falcon_fw_ctor+0x3e/0x60 [nouveau]
+ nvkm_gsp_fwsec+0x10e/0x2c0 [nouveau]
+ ? sysvec_apic_timer_interrupt+0xe/0x90
+ nvkm_gsp_fwsec_sb+0x27/0x70 [nouveau]
+ tu102_gsp_fini+0x65/0x110 [nouveau]
+ ? ktime_get+0x3c/0xf0
+ nvkm_subdev_fini+0x67/0xc0 [nouveau]
+ nvkm_device_fini+0x94/0x140 [nouveau]
+ nvkm_udevice_fini+0x50/0x70 [nouveau]
+ nvkm_object_fini+0xb1/0x140 [nouveau]
+ nvkm_object_fini+0x70/0x140 [nouveau]
+ ? __pfx_pci_pm_runtime_suspend+0x10/0x10
+ nouveau_do_suspend+0xe4/0x170 [nouveau]
+ nouveau_pmops_runtime_suspend+0x3e/0xb0 [nouveau]
+ pci_pm_runtime_suspend+0x67/0x1a0
+ ? __pfx_pci_pm_runtime_suspend+0x10/0x10
+ __rpm_callback+0x45/0x1f0
+ ? __pfx_pci_pm_runtime_suspend+0x10/0x10
+ rpm_callback+0x6d/0x80
+ rpm_suspend+0xe5/0x5e0
+ ? finish_task_switch.isra.0+0x99/0x2c0
+ pm_runtime_work+0x98/0xb0
+ process_one_work+0x18f/0x350
+ worker_thread+0x25a/0x3a0
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0xf9/0x240
+ ? __pfx_kthread+0x10/0x10
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0xf1/0x110
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+The reason this happens is because the fwsec-sb firmware image only
+supports being booted from a contiguous coherent sysmem allocation. If a
+system runs into enough memory fragmentation from memory pressure, such as
+what can happen on systems with low amounts of memory, this can lead to a
+situation where it later becomes impossible to find space for a large
+enough contiguous allocation to hold fwsec-sb. This causes us to fail to
+boot the firmware image, causing the GPU to fail booting and causing the
+driver to fail.
+
+Since this firmware can't use non-contiguous allocations, the best solution
+to avoid this issue is to simply allocate the memory for fwsec-sb during
+initial driver-load, and reuse the memory allocation when fwsec-sb needs to
+be used. We then release the memory allocations on driver unload.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 594766ca3e53 ("drm/nouveau/gsp: move booter handling to GPU-specific code")
+Cc: <stable@vger.kernel.org> # v6.16+
+Reviewed-by: Timur Tabi <ttabi@nvidia.com>
+Link: https://patch.msgid.link/20251202175918.63533-1-lyude@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ .../gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 4 ++
+ .../gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c | 61 +++++++++++++------
+ .../gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 3 +
+ .../drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 10 ++-
+ 4 files changed, 58 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+index 226c7ec56b8e..b8b97e10ae83 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+@@ -73,6 +73,10 @@ struct nvkm_gsp {
+
+ const struct firmware *bl;
+ const struct firmware *rm;
++
++ struct {
++ struct nvkm_falcon_fw sb;
++ } falcon;
+ } fws;
+
+ struct nvkm_firmware fw;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+index 5b721bd9d799..503760246660 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+@@ -259,18 +259,16 @@ nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name,
+ }
+
+ static int
+-nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
++nvkm_gsp_fwsec_init(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, const char *name, u32 init_cmd)
+ {
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ const union nvfw_falcon_ucode_desc *desc;
+ struct nvbios_pmuE flcn_ucode;
+- u8 idx, ver, hdr;
+ u32 data;
+ u16 size, vers;
+- struct nvkm_falcon_fw fw = {};
+- u32 mbox0 = 0;
++ u8 idx, ver, hdr;
+ int ret;
+
+ /* Lookup in VBIOS. */
+@@ -291,8 +289,8 @@ nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
+ vers = (desc->v2.Hdr & 0x0000ff00) >> 8;
+
+ switch (vers) {
+- case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break;
+- case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break;
++ case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, fw); break;
++ case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, fw); break;
+ default:
+ nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers);
+ return -EINVAL;
+@@ -303,15 +301,19 @@ nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
+ return ret;
+ }
+
+- /* Boot. */
+- ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0);
+- nvkm_falcon_fw_dtor(&fw);
+- if (ret)
+- return ret;
+-
+ return 0;
+ }
+
++static int
++nvkm_gsp_fwsec_boot(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw)
++{
++ struct nvkm_subdev *subdev = &gsp->subdev;
++ u32 mbox0 = 0;
++
++ /* Boot */
++ return nvkm_falcon_fw_boot(fw, subdev, true, &mbox0, NULL, 0, 0);
++}
++
+ int
+ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
+ {
+@@ -320,7 +322,7 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
+ int ret;
+ u32 err;
+
+- ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
++ ret = nvkm_gsp_fwsec_boot(gsp, &gsp->fws.falcon.sb);
+ if (ret)
+ return ret;
+
+@@ -334,27 +336,48 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
+ return 0;
+ }
+
++int
++nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
++{
++ return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb",
++ NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
++}
++
++void
++nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
++{
++ nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
++}
++
+ int
+ nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
+ {
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
++ struct nvkm_falcon_fw fw = {};
+ int ret;
+ u32 err, wpr2_lo, wpr2_hi;
+
+- ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
++ ret = nvkm_gsp_fwsec_init(gsp, &fw, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
+ if (ret)
+ return ret;
+
++ ret = nvkm_gsp_fwsec_boot(gsp, &fw);
++ if (ret)
++ goto fwsec_dtor;
++
+ /* Verify. */
+ err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16;
+ if (err) {
+ nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err);
+- return -EIO;
++ ret = -EIO;
++ } else {
++ wpr2_lo = nvkm_rd32(device, 0x1fa824);
++ wpr2_hi = nvkm_rd32(device, 0x1fa828);
++ nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
+ }
+
+- wpr2_lo = nvkm_rd32(device, 0x1fa824);
+- wpr2_hi = nvkm_rd32(device, 0x1fa828);
+- nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
+- return 0;
++fwsec_dtor:
++ nvkm_falcon_fw_dtor(&fw);
++ return ret;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+index c3494b7ac572..86bdd203bc10 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+@@ -6,7 +6,10 @@
+ enum nvkm_acr_lsf_id;
+
+ int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
++
++int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
+ int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
++void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
+
+ struct nvkm_gsp_fwif {
+ int version;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
+index 32e6a065d6d7..2a7e80c6d70f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
+@@ -1817,12 +1817,16 @@ r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
+ RM_RISCV_UCODE_DESC *desc;
+ int ret;
+
++ ret = nvkm_gsp_fwsec_sb_ctor(gsp);
++ if (ret)
++ return ret;
++
+ hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
+ desc = (void *)fw->data + hdr->header_offset;
+
+ ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
+ if (ret)
+- return ret;
++ goto dtor_fwsec;
+
+ memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
+
+@@ -1831,6 +1835,9 @@ r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
+ gsp->boot.manifest_offset = desc->manifestOffset;
+ gsp->boot.app_version = desc->appVersion;
+ return 0;
++dtor_fwsec:
++ nvkm_gsp_fwsec_sb_dtor(gsp);
++ return ret;
+ }
+
+ static const struct nvkm_firmware_func
+@@ -2101,6 +2108,7 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
+ mutex_destroy(&gsp->cmdq.mutex);
+
+ nvkm_gsp_dtor_fws(gsp);
++ nvkm_gsp_fwsec_sb_dtor(gsp);
+
+ nvkm_gsp_mem_dtor(&gsp->rmargs);
+ nvkm_gsp_mem_dtor(&gsp->wpr_meta);
+--
+2.52.0
+
--- /dev/null
+From 754c23238438600e9236719f7e67aff2c4d02093 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Fri, 19 Dec 2025 12:32:59 +0100
+Subject: drm/pagemap, drm/xe: Ensure that the devmem allocation is idle before use
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 754c23238438600e9236719f7e67aff2c4d02093 upstream.
+
+In situations where no system memory is migrated to devmem, and in
+upcoming patches where another GPU is performing the migration to
+the newly allocated devmem buffer, there is nothing to ensure any
+ongoing clear to the devmem allocation or async eviction from the
+devmem allocation is complete.
+
+Address that by passing a struct dma_fence down to the copy
+functions, and ensure it is waited for before migration is marked
+complete.
+
+v3:
+- New patch.
+v4:
+- Update the logic used for determining when to wait for the
+ pre_migrate_fence.
+- Update the logic used for determining when to warn for the
+ pre_migrate_fence since the scheduler fences apparently
+ can signal out-of-order.
+v5:
+- Fix a UAF (CI)
+- Remove references to source P2P migration (Himal)
+- Put the pre_migrate_fence after migration.
+v6:
+- Pipeline the pre_migrate_fence dependency (Matt Brost)
+
+Fixes: c5b3eb5a906c ("drm/xe: Add GPUSVM device memory copy vfunc functions")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.15+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> # For merging through drm-xe.
+Link: https://patch.msgid.link/20251219113320.183860-4-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 16b5ad31952476fb925c401897fc171cd37f536b)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_pagemap.c | 17 ++++++++++---
+ drivers/gpu/drm/xe/xe_migrate.c | 25 ++++++++++++++++----
+ drivers/gpu/drm/xe/xe_migrate.h | 6 +++-
+ drivers/gpu/drm/xe/xe_svm.c | 49 ++++++++++++++++++++++++++++++----------
+ include/drm/drm_pagemap.h | 17 +++++++++++--
+ 5 files changed, 88 insertions(+), 26 deletions(-)
+
+--- a/drivers/gpu/drm/drm_pagemap.c
++++ b/drivers/gpu/drm/drm_pagemap.c
+@@ -3,6 +3,7 @@
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
++#include <linux/dma-fence.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/migrate.h>
+ #include <linux/pagemap.h>
+@@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct
+ drm_pagemap_get_devmem_page(page, zdd);
+ }
+
+- err = ops->copy_to_devmem(pages, pagemap_addr, npages);
++ err = ops->copy_to_devmem(pages, pagemap_addr, npages,
++ devmem_allocation->pre_migrate_fence);
+ if (err)
+ goto err_finalize;
+
++ dma_fence_put(devmem_allocation->pre_migrate_fence);
++ devmem_allocation->pre_migrate_fence = NULL;
++
+ /* Upon success bind devmem allocation to range and zdd */
+ devmem_allocation->timeslice_expiration = get_jiffies_64() +
+ msecs_to_jiffies(timeslice_ms);
+@@ -596,7 +601,7 @@ retry:
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(src[i]);
+
+- err = ops->copy_to_ram(pages, pagemap_addr, npages);
++ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
+ if (err)
+ goto err_finalize;
+
+@@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(migrate.src[i]);
+
+- err = ops->copy_to_ram(pages, pagemap_addr, npages);
++ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
+ if (err)
+ goto err_finalize;
+
+@@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_op
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap we're allocating from.
+ * @size: Size of device memory allocation
++ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
++ * (May be NULL).
+ */
+ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+- struct drm_pagemap *dpagemap, size_t size)
++ struct drm_pagemap *dpagemap, size_t size,
++ struct dma_fence *pre_migrate_fence)
+ {
+ init_completion(&devmem_allocation->detached);
+ devmem_allocation->dev = dev;
+@@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_
+ devmem_allocation->ops = ops;
+ devmem_allocation->dpagemap = dpagemap;
+ devmem_allocation->size = size;
++ devmem_allocation->pre_migrate_fence = pre_migrate_fence;
+ }
+ EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
+
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -1813,6 +1813,7 @@ static struct dma_fence *xe_migrate_vram
+ unsigned long sram_offset,
+ struct drm_pagemap_addr *sram_addr,
+ u64 vram_addr,
++ struct dma_fence *deps,
+ const enum xe_migrate_copy_dir dir)
+ {
+ struct xe_gt *gt = m->tile->primary_gt;
+@@ -1890,6 +1891,14 @@ static struct dma_fence *xe_migrate_vram
+
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+
++ if (deps && !dma_fence_is_signaled(deps)) {
++ dma_fence_get(deps);
++ err = drm_sched_job_add_dependency(&job->drm, deps);
++ if (err)
++ dma_fence_wait(deps, false);
++ err = 0;
++ }
++
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+@@ -1915,6 +1924,8 @@ err:
+ * @npages: Number of pages to migrate.
+ * @src_addr: Array of DMA information (source of migrate)
+ * @dst_addr: Device physical address of VRAM (destination of migrate)
++ * @deps: struct dma_fence representing the dependencies that need
++ * to be signaled before migration.
+ *
+ * Copy from an array dma addresses to a VRAM device physical address
+ *
+@@ -1924,10 +1935,11 @@ err:
+ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ struct drm_pagemap_addr *src_addr,
+- u64 dst_addr)
++ u64 dst_addr,
++ struct dma_fence *deps)
+ {
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
+- XE_MIGRATE_COPY_TO_VRAM);
++ deps, XE_MIGRATE_COPY_TO_VRAM);
+ }
+
+ /**
+@@ -1936,6 +1948,8 @@ struct dma_fence *xe_migrate_to_vram(str
+ * @npages: Number of pages to migrate.
+ * @src_addr: Device physical address of VRAM (source of migrate)
+ * @dst_addr: Array of DMA information (destination of migrate)
++ * @deps: struct dma_fence representing the dependencies that need
++ * to be signaled before migration.
+ *
+ * Copy from a VRAM device physical address to an array dma addresses
+ *
+@@ -1945,10 +1959,11 @@ struct dma_fence *xe_migrate_to_vram(str
+ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+- struct drm_pagemap_addr *dst_addr)
++ struct drm_pagemap_addr *dst_addr,
++ struct dma_fence *deps)
+ {
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
+- XE_MIGRATE_COPY_TO_SRAM);
++ deps, XE_MIGRATE_COPY_TO_SRAM);
+ }
+
+ static void xe_migrate_dma_unmap(struct xe_device *xe,
+@@ -2121,7 +2136,7 @@ int xe_migrate_access_memory(struct xe_m
+ __fence = xe_migrate_vram(m, current_bytes,
+ (unsigned long)buf & ~PAGE_MASK,
+ &pagemap_addr[current_page],
+- vram_addr, write ?
++ vram_addr, NULL, write ?
+ XE_MIGRATE_COPY_TO_VRAM :
+ XE_MIGRATE_COPY_TO_SRAM);
+ if (IS_ERR(__fence)) {
+--- a/drivers/gpu/drm/xe/xe_migrate.h
++++ b/drivers/gpu/drm/xe/xe_migrate.h
+@@ -111,12 +111,14 @@ int xe_migrate_init(struct xe_migrate *m
+ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ struct drm_pagemap_addr *src_addr,
+- u64 dst_addr);
++ u64 dst_addr,
++ struct dma_fence *deps);
+
+ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+- struct drm_pagemap_addr *dst_addr);
++ struct drm_pagemap_addr *dst_addr,
++ struct dma_fence *deps);
+
+ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
+ struct xe_bo *src_bo,
+--- a/drivers/gpu/drm/xe/xe_svm.c
++++ b/drivers/gpu/drm/xe/xe_svm.c
+@@ -477,7 +477,8 @@ static void xe_svm_copy_us_stats_incr(st
+
+ static int xe_svm_copy(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+- unsigned long npages, const enum xe_svm_copy_dir dir)
++ unsigned long npages, const enum xe_svm_copy_dir dir,
++ struct dma_fence *pre_migrate_fence)
+ {
+ struct xe_vram_region *vr = NULL;
+ struct xe_gt *gt = NULL;
+@@ -566,7 +567,8 @@ static int xe_svm_copy(struct page **pag
+ __fence = xe_migrate_from_vram(vr->migrate,
+ i - pos + incr,
+ vram_addr,
+- &pagemap_addr[pos]);
++ &pagemap_addr[pos],
++ pre_migrate_fence);
+ } else {
+ vm_dbg(&xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+@@ -575,13 +577,14 @@ static int xe_svm_copy(struct page **pag
+ __fence = xe_migrate_to_vram(vr->migrate,
+ i - pos + incr,
+ &pagemap_addr[pos],
+- vram_addr);
++ vram_addr,
++ pre_migrate_fence);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+-
++ pre_migrate_fence = NULL;
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+@@ -604,20 +607,22 @@ static int xe_svm_copy(struct page **pag
+ vram_addr, (u64)pagemap_addr[pos].addr, 1);
+ __fence = xe_migrate_from_vram(vr->migrate, 1,
+ vram_addr,
+- &pagemap_addr[pos]);
++ &pagemap_addr[pos],
++ pre_migrate_fence);
+ } else {
+ vm_dbg(&xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ (u64)pagemap_addr[pos].addr, vram_addr, 1);
+ __fence = xe_migrate_to_vram(vr->migrate, 1,
+ &pagemap_addr[pos],
+- vram_addr);
++ vram_addr,
++ pre_migrate_fence);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+-
++ pre_migrate_fence = NULL;
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+@@ -630,6 +635,8 @@ err_out:
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
++ if (pre_migrate_fence)
++ dma_fence_wait(pre_migrate_fence, false);
+
+ /*
+ * XXX: We can't derive the GT here (or anywhere in this functions, but
+@@ -646,16 +653,20 @@ err_out:
+
+ static int xe_svm_copy_to_devmem(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+- unsigned long npages)
++ unsigned long npages,
++ struct dma_fence *pre_migrate_fence)
+ {
+- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
++ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
++ pre_migrate_fence);
+ }
+
+ static int xe_svm_copy_to_ram(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+- unsigned long npages)
++ unsigned long npages,
++ struct dma_fence *pre_migrate_fence)
+ {
+- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
++ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
++ pre_migrate_fence);
+ }
+
+ static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
+@@ -668,6 +679,7 @@ static void xe_svm_devmem_release(struct
+ struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct xe_device *xe = xe_bo_device(bo);
+
++ dma_fence_put(devmem_allocation->pre_migrate_fence);
+ xe_bo_put_async(bo);
+ xe_pm_runtime_put(xe);
+ }
+@@ -862,6 +874,7 @@ static int xe_drm_pagemap_populate_mm(st
+ unsigned long timeslice_ms)
+ {
+ struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
++ struct dma_fence *pre_migrate_fence = NULL;
+ struct xe_device *xe = vr->xe;
+ struct device *dev = xe->drm.dev;
+ struct drm_buddy_block *block;
+@@ -888,8 +901,20 @@ static int xe_drm_pagemap_populate_mm(st
+ break;
+ }
+
++ /* Ensure that any clearing or async eviction will complete before migration. */
++ if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
++ err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
++ &pre_migrate_fence);
++ if (err)
++ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
++ false, MAX_SCHEDULE_TIMEOUT);
++ else if (pre_migrate_fence)
++ dma_fence_enable_sw_signaling(pre_migrate_fence);
++ }
++
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+- &dpagemap_devmem_ops, dpagemap, end - start);
++ &dpagemap_devmem_ops, dpagemap, end - start,
++ pre_migrate_fence);
+
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+--- a/include/drm/drm_pagemap.h
++++ b/include/drm/drm_pagemap.h
+@@ -8,6 +8,7 @@
+
+ #define NR_PAGES(order) (1U << (order))
+
++struct dma_fence;
+ struct drm_pagemap;
+ struct drm_pagemap_zdd;
+ struct device;
+@@ -174,6 +175,8 @@ struct drm_pagemap_devmem_ops {
+ * @pages: Pointer to array of device memory pages (destination)
+ * @pagemap_addr: Pointer to array of DMA information (source)
+ * @npages: Number of pages to copy
++ * @pre_migrate_fence: dma-fence to wait for before migration start.
++ * May be NULL.
+ *
+ * Copy pages to device memory. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+@@ -183,13 +186,16 @@ struct drm_pagemap_devmem_ops {
+ */
+ int (*copy_to_devmem)(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+- unsigned long npages);
++ unsigned long npages,
++ struct dma_fence *pre_migrate_fence);
+
+ /**
+ * @copy_to_ram: Copy to system RAM (required for migration)
+ * @pages: Pointer to array of device memory pages (source)
+ * @pagemap_addr: Pointer to array of DMA information (destination)
+ * @npages: Number of pages to copy
++ * @pre_migrate_fence: dma-fence to wait for before migration start.
++ * May be NULL.
+ *
+ * Copy pages to system RAM. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+@@ -199,7 +205,8 @@ struct drm_pagemap_devmem_ops {
+ */
+ int (*copy_to_ram)(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+- unsigned long npages);
++ unsigned long npages,
++ struct dma_fence *pre_migrate_fence);
+ };
+
+ /**
+@@ -212,6 +219,8 @@ struct drm_pagemap_devmem_ops {
+ * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
+ * @size: Size of device memory allocation
+ * @timeslice_expiration: Timeslice expiration in jiffies
++ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
++ * (May be NULL).
+ */
+ struct drm_pagemap_devmem {
+ struct device *dev;
+@@ -221,6 +230,7 @@ struct drm_pagemap_devmem {
+ struct drm_pagemap *dpagemap;
+ size_t size;
+ u64 timeslice_expiration;
++ struct dma_fence *pre_migrate_fence;
+ };
+
+ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+@@ -238,7 +248,8 @@ struct drm_pagemap *drm_pagemap_page_to_
+ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+- struct drm_pagemap *dpagemap, size_t size);
++ struct drm_pagemap *dpagemap, size_t size,
++ struct dma_fence *pre_migrate_fence);
+
+ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
--- /dev/null
+From 7d7bb790aced3b1b8550b74e02fdfc001d044bee Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Date: Wed, 22 Oct 2025 19:19:48 +0300
+Subject: drm/rockchip: Set VOP for the DRM DMA device
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+commit 7d7bb790aced3b1b8550b74e02fdfc001d044bee upstream.
+
+Use VOP for DMA operations performed by DRM core. Rockchip DRM driver
+is backed by a virtual device that isn't IOMMU-capable, while VOP is the
+actual display controller device backed by IOMMU. Fixes "swiotlb buffer
+is full" warning messages originated from GEM prime code paths.
+
+Note, that backporting is non-trivial as this depends on
+commit 143ec8d3f9396 ("drm/prime: Support dedicated DMA device for dma-buf
+imports"), which landed in v6.16 and commit 421be3ee36a4 ("drm/rockchip:
+Refactor IOMMU initialisation"), which landed in v5.19.
+
+Reported-by: Daniel Stone <daniels@collabora.com>
+Fixes: 2048e3286f34 ("drm: rockchip: Add basic drm driver")
+Cc: stable@vger.kernel.org # v6.16+
+Reviewed-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Tested-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://lore.kernel.org/r/20251022161948.199731-1-dmitry.osipenko@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+@@ -96,6 +96,9 @@ void rockchip_drm_dma_init_device(struct
+ private->iommu_dev = ERR_PTR(-ENODEV);
+ else if (!private->iommu_dev)
+ private->iommu_dev = dev;
++
++ if (!IS_ERR(private->iommu_dev))
++ drm_dev_set_dma_dev(drm_dev, private->iommu_dev);
+ }
+
+ static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
--- /dev/null
+From d3fe9aa495854f8d88c69c41a4b31e69424656ad Mon Sep 17 00:00:00 2001
+From: Andy Yan <andy.yan@rock-chips.com>
+Date: Wed, 12 Nov 2025 16:50:23 +0800
+Subject: drm/rockchip: vop2: Use OVL_LAYER_SEL configuration instead of use win_mask calculate used layers
+
+From: Andy Yan <andy.yan@rock-chips.com>
+
+commit d3fe9aa495854f8d88c69c41a4b31e69424656ad upstream.
+
+When there are multiple Video Ports, and only one of them is working
+(for example, VP1 is working while VP0 is not), in this case, the
+win_mask of VP0 is 0. However, we have already set the port mux for VP0
+according to vp0->nlayers, and at the same time, in the OVL_LAYER_SEL
+register, there are windows will also be assigned to layers which will
+map to the inactive VPs. In this situation, vp0->win_mask is zero as it
+now working, it is more reliable to calculate the used layers based on
+the configuration of the OVL_LAYER_SEL register.
+
+Note: as the configuration of OVL_LAYER_SEL is take effect when the
+vsync is come, so we use the value backup in vop2->old_layer_sel instead
+of read OVL_LAYER_SEL directly.
+
+Fixes: 3e89a8c68354 ("drm/rockchip: vop2: Fix the update of LAYER/PORT select registers when there are multi display output on rk3588/rk3568")
+Cc: stable@vger.kernel.org
+Reported-by: Diederik de Haas <diederik@cknow-tech.com>
+Closes: https://bugs.kde.org/show_bug.cgi?id=511274
+Signed-off-by: Andy Yan <andy.yan@rock-chips.com>
+Tested-by: Dang Huynh <dang.huynh@mainlining.org>
+Tested-by: Diederik de Haas <diederik@cknow-tech.com>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://lore.kernel.org/r/20251112085024.2480111-1-andyshrk@163.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 49 +++++++++++++++++++++++----
+ 1 file changed, 42 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
++++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+@@ -1369,6 +1369,25 @@ static const struct vop2_regs_dump rk358
+ },
+ };
+
++/*
++ * phys_id is used to identify a main window(Cluster Win/Smart Win, not
++ * include the sub win of a cluster or the multi area) that can do overlay
++ * in main overlay stage.
++ */
++static struct vop2_win *vop2_find_win_by_phys_id(struct vop2 *vop2, uint8_t phys_id)
++{
++ struct vop2_win *win;
++ int i;
++
++ for (i = 0; i < vop2->data->win_size; i++) {
++ win = &vop2->win[i];
++ if (win->data->phys_id == phys_id)
++ return win;
++ }
++
++ return NULL;
++}
++
+ static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+ {
+ struct vop2 *vop2 = vp->vop2;
+@@ -1842,15 +1861,31 @@ static void vop2_parse_alpha(struct vop2
+ alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
+ }
+
+-static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
++static int vop2_find_start_mixer_id_for_vp(struct vop2_video_port *vp)
+ {
+- struct vop2_video_port *vp;
+- int used_layer = 0;
++ struct vop2 *vop2 = vp->vop2;
++ struct vop2_win *win;
++ u32 layer_sel = vop2->old_layer_sel;
++ u32 used_layer = 0;
++ unsigned long win_mask = vp->win_mask;
++ unsigned long phys_id;
++ bool match;
+ int i;
+
+- for (i = 0; i < port_id; i++) {
+- vp = &vop2->vps[i];
+- used_layer += hweight32(vp->win_mask);
++ for (i = 0; i < 31; i += 4) {
++ match = false;
++ for_each_set_bit(phys_id, &win_mask, ROCKCHIP_VOP2_ESMART3) {
++ win = vop2_find_win_by_phys_id(vop2, phys_id);
++ if (win->data->layer_sel_id[vp->id] == ((layer_sel >> i) & 0xf)) {
++ match = true;
++ break;
++ }
++ }
++
++ if (!match)
++ used_layer += 1;
++ else
++ break;
+ }
+
+ return used_layer;
+@@ -1935,7 +1970,7 @@ static void vop2_setup_alpha(struct vop2
+ u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
+
+ if (vop2->version <= VOP_VERSION_RK3588)
+- mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
++ mixer_id = vop2_find_start_mixer_id_for_vp(vp);
+ else
+ mixer_id = 0;
+
--- /dev/null
+From a585c7ef9cabda58088916baedc6573e9a5cd2a7 Mon Sep 17 00:00:00 2001
+From: "Kory Maincent (TI.com)" <kory.maincent@bootlin.com>
+Date: Tue, 25 Nov 2025 10:05:44 +0100
+Subject: drm/tilcdc: Fix removal actions in case of failed probe
+
+From: Kory Maincent (TI.com) <kory.maincent@bootlin.com>
+
+commit a585c7ef9cabda58088916baedc6573e9a5cd2a7 upstream.
+
+The drm_kms_helper_poll_fini() and drm_atomic_helper_shutdown() helpers
+should only be called when the device has been successfully registered.
+Currently, these functions are called unconditionally in tilcdc_fini(),
+which causes warnings during probe deferral scenarios.
+
+[ 7.972317] WARNING: CPU: 0 PID: 23 at drivers/gpu/drm/drm_atomic_state_helper.c:175 drm_atomic_helper_crtc_duplicate_state+0x60/0x68
+...
+[ 8.005820] drm_atomic_helper_crtc_duplicate_state from drm_atomic_get_crtc_state+0x68/0x108
+[ 8.005858] drm_atomic_get_crtc_state from drm_atomic_helper_disable_all+0x90/0x1c8
+[ 8.005885] drm_atomic_helper_disable_all from drm_atomic_helper_shutdown+0x90/0x144
+[ 8.005911] drm_atomic_helper_shutdown from tilcdc_fini+0x68/0xf8 [tilcdc]
+[ 8.005957] tilcdc_fini [tilcdc] from tilcdc_pdev_probe+0xb0/0x6d4 [tilcdc]
+
+Fix this by rewriting the failed probe cleanup path using the standard
+goto error handling pattern, which ensures that cleanup functions are
+only called on successfully initialized resources. Additionally, remove
+the now-unnecessary is_registered flag.
+
+Cc: stable@vger.kernel.org
+Fixes: 3c4babae3c4a ("drm: Call drm_atomic_helper_shutdown() at shutdown/remove time for misc drivers")
+Signed-off-by: Kory Maincent (TI.com) <kory.maincent@bootlin.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patch.msgid.link/20251125090546.137193-1-kory.maincent@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 2 -
+ drivers/gpu/drm/tilcdc/tilcdc_drv.c | 53 +++++++++++++++++++++++------------
+ drivers/gpu/drm/tilcdc/tilcdc_drv.h | 2 -
+ 3 files changed, 37 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+@@ -586,7 +586,7 @@ out:
+ drm_modeset_unlock(&crtc->mutex);
+ }
+
+-static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
++void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+ {
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -172,8 +172,7 @@ static void tilcdc_fini(struct drm_devic
+ if (priv->crtc)
+ tilcdc_crtc_shutdown(priv->crtc);
+
+- if (priv->is_registered)
+- drm_dev_unregister(dev);
++ drm_dev_unregister(dev);
+
+ drm_kms_helper_poll_fini(dev);
+ drm_atomic_helper_shutdown(dev);
+@@ -220,21 +219,21 @@ static int tilcdc_init(const struct drm_
+ priv->wq = alloc_ordered_workqueue("tilcdc", 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+- goto init_failed;
++ goto put_drm;
+ }
+
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmio)) {
+ dev_err(dev, "failed to request / ioremap\n");
+ ret = PTR_ERR(priv->mmio);
+- goto init_failed;
++ goto free_wq;
+ }
+
+ priv->clk = clk_get(dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get functional clock\n");
+ ret = -ENODEV;
+- goto init_failed;
++ goto free_wq;
+ }
+
+ pm_runtime_enable(dev);
+@@ -313,7 +312,7 @@ static int tilcdc_init(const struct drm_
+ ret = tilcdc_crtc_create(ddev);
+ if (ret < 0) {
+ dev_err(dev, "failed to create crtc\n");
+- goto init_failed;
++ goto disable_pm;
+ }
+ modeset_init(ddev);
+
+@@ -324,46 +323,46 @@ static int tilcdc_init(const struct drm_
+ if (ret) {
+ dev_err(dev, "failed to register cpufreq notifier\n");
+ priv->freq_transition.notifier_call = NULL;
+- goto init_failed;
++ goto destroy_crtc;
+ }
+ #endif
+
+ if (priv->is_componentized) {
+ ret = component_bind_all(dev, ddev);
+ if (ret < 0)
+- goto init_failed;
++ goto unregister_cpufreq_notif;
+
+ ret = tilcdc_add_component_encoder(ddev);
+ if (ret < 0)
+- goto init_failed;
++ goto unbind_component;
+ } else {
+ ret = tilcdc_attach_external_device(ddev);
+ if (ret)
+- goto init_failed;
++ goto unregister_cpufreq_notif;
+ }
+
+ if (!priv->external_connector &&
+ ((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
+ dev_err(dev, "no encoders/connectors found\n");
+ ret = -EPROBE_DEFER;
+- goto init_failed;
++ goto unbind_component;
+ }
+
+ ret = drm_vblank_init(ddev, 1);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize vblank\n");
+- goto init_failed;
++ goto unbind_component;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+- goto init_failed;
++ goto unbind_component;
+ priv->irq = ret;
+
+ ret = tilcdc_irq_install(ddev, priv->irq);
+ if (ret < 0) {
+ dev_err(dev, "failed to install IRQ handler\n");
+- goto init_failed;
++ goto unbind_component;
+ }
+
+ drm_mode_config_reset(ddev);
+@@ -372,16 +371,34 @@ static int tilcdc_init(const struct drm_
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+- goto init_failed;
+- priv->is_registered = true;
++ goto stop_poll;
+
+ drm_client_setup_with_color_mode(ddev, bpp);
+
+ return 0;
+
+-init_failed:
+- tilcdc_fini(ddev);
++stop_poll:
++ drm_kms_helper_poll_fini(ddev);
++ tilcdc_irq_uninstall(ddev);
++unbind_component:
++ if (priv->is_componentized)
++ component_unbind_all(dev, ddev);
++unregister_cpufreq_notif:
++#ifdef CONFIG_CPU_FREQ
++ cpufreq_unregister_notifier(&priv->freq_transition,
++ CPUFREQ_TRANSITION_NOTIFIER);
++destroy_crtc:
++#endif
++ tilcdc_crtc_destroy(priv->crtc);
++disable_pm:
++ pm_runtime_disable(dev);
++ clk_put(priv->clk);
++free_wq:
++ destroy_workqueue(priv->wq);
++put_drm:
+ platform_set_drvdata(pdev, NULL);
++ ddev->dev_private = NULL;
++ drm_dev_put(ddev);
+
+ return ret;
+ }
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+@@ -82,7 +82,6 @@ struct tilcdc_drm_private {
+ struct drm_encoder *external_encoder;
+ struct drm_connector *external_connector;
+
+- bool is_registered;
+ bool is_componentized;
+ bool irq_enabled;
+ };
+@@ -164,6 +163,7 @@ void tilcdc_crtc_set_panel_info(struct d
+ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
+ bool simulate_vesa_sync);
+ void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
++void tilcdc_crtc_destroy(struct drm_crtc *crtc);
+ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
--- /dev/null
+From 491adc6a0f9903c32b05f284df1148de39e8e644 Mon Sep 17 00:00:00 2001
+From: Simon Richter <Simon.Richter@hogyros.de>
+Date: Tue, 14 Oct 2025 01:11:33 +0900
+Subject: drm/ttm: Avoid NULL pointer deref for evicted BOs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Simon Richter <Simon.Richter@hogyros.de>
+
+commit 491adc6a0f9903c32b05f284df1148de39e8e644 upstream.
+
+It is possible for a BO to exist that is not currently associated with a
+resource, e.g. because it has been evicted.
+
+When devcoredump tries to read the contents of all BOs for dumping, we need
+to expect this as well -- in this case, ENODATA is recorded instead of the
+buffer contents.
+
+Fixes: 7d08df5d0bd3 ("drm/ttm: Add ttm_bo_access")
+Fixes: 09ac4fcb3f25 ("drm/ttm: Implement vm_operations_struct.access v2")
+Cc: stable <stable@kernel.org>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6271
+Signed-off-by: Simon Richter <Simon.Richter@hogyros.de>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251013161241.709916-1-Simon.Richter@hogyros.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -434,6 +434,11 @@ int ttm_bo_access(struct ttm_buffer_obje
+ if (ret)
+ return ret;
+
++ if (!bo->resource) {
++ ret = -ENODATA;
++ goto unlock;
++ }
++
+ switch (bo->resource->mem_type) {
+ case TTM_PL_SYSTEM:
+ fallthrough;
+@@ -448,6 +453,7 @@ int ttm_bo_access(struct ttm_buffer_obje
+ ret = -EIO;
+ }
+
++unlock:
+ ttm_bo_unreserve(bo);
+
+ return ret;
--- /dev/null
+From 6f0f404bd289d79a260b634c5b3f4d330b13472c Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Fri, 12 Dec 2025 10:28:41 -0800
+Subject: drm/xe: Adjust long-running workload timeslices to reasonable values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 6f0f404bd289d79a260b634c5b3f4d330b13472c upstream.
+
+A 10ms timeslice for long-running workloads is far too long and causes
+significant jitter in benchmarks when the system is shared. Adjust the
+value to 5ms for preempt-fencing VMs, as the resume step there is quite
+costly as memory is moved around, and set it to zero for pagefault VMs,
+since switching back to pagefault mode after dma-fence mode is
+relatively fast.
+
+Also change min_run_period_ms to 'unsiged int' type rather than 's64' as
+only positive values make sense.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Link: https://patch.msgid.link/20251212182847.1683222-2-matthew.brost@intel.com
+(cherry picked from commit 33a5abd9a68394aa67f9618b20eee65ee8702ff4)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 5 ++++-
+ drivers/gpu/drm/xe/xe_vm_types.h | 2 +-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1481,7 +1481,10 @@ struct xe_vm *xe_vm_create(struct xe_dev
+ INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
+
+ INIT_LIST_HEAD(&vm->preempt.exec_queues);
+- vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
++ if (flags & XE_VM_FLAG_FAULT_MODE)
++ vm->preempt.min_run_period_ms = 0;
++ else
++ vm->preempt.min_run_period_ms = 5;
+
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_init(&vm->rftree[id]);
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -268,7 +268,7 @@ struct xe_vm {
+ * @min_run_period_ms: The minimum run period before preempting
+ * an engine again
+ */
+- s64 min_run_period_ms;
++ unsigned int min_run_period_ms;
+ /** @exec_queues: list of exec queues attached to this VM */
+ struct list_head exec_queues;
+ /** @num_exec_queues: number exec queues attached to this VM */
--- /dev/null
+From 449bcd5d45eb4ce26740f11f8601082fe734bed2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Tue, 9 Dec 2025 21:49:20 +0100
+Subject: drm/xe/bo: Don't include the CCS metadata in the dma-buf sg-table
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 449bcd5d45eb4ce26740f11f8601082fe734bed2 upstream.
+
+Some Xe bos are allocated with extra backing-store for the CCS
+metadata. It's never been the intention to share the CCS metadata
+when exporting such bos as dma-buf. Don't include it in the
+dma-buf sg-table.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com>
+Link: https://patch.msgid.link/20251209204920.224374-1-thomas.hellstrom@linux.intel.com
+(cherry picked from commit a4ebfb9d95d78a12512b435a698ee6886d712571)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_dma_buf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/xe_dma_buf.c
+@@ -113,7 +113,7 @@ static struct sg_table *xe_dma_buf_map(s
+ case XE_PL_TT:
+ sgt = drm_prime_pages_to_sg(obj->dev,
+ bo->ttm.ttm->pages,
+- bo->ttm.ttm->num_pages);
++ obj->size >> PAGE_SHIFT);
+ if (IS_ERR(sgt))
+ return sgt;
+
--- /dev/null
+From fe3ccd24138fd391ae8e32289d492c85f67770fc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Wed, 17 Dec 2025 10:34:41 +0100
+Subject: drm/xe: Drop preempt-fences when destroying imported dma-bufs.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit fe3ccd24138fd391ae8e32289d492c85f67770fc upstream.
+
+When imported dma-bufs are destroyed, TTM is not fully
+individualizing the dma-resv, but it *is* copying the fences that
+need to be waited for before declaring idle. So in the case where
+the bo->resv != bo->_resv we can still drop the preempt-fences, but
+make sure we do that on bo->_resv which contains the fence-pointer
+copy.
+
+In the case where the copying fails, bo->_resv will typically not
+contain any fences pointers at all, so there will be nothing to
+drop. In that case, TTM would have ensured all fences that would
+have been copied are signaled, including any remaining preempt
+fences.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Fixes: fa0af721bd1f ("drm/ttm: test private resv obj on release/destroy")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.16+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Tested-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20251217093441.5073-1-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 425fe550fb513b567bd6d01f397d274092a9c274)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_bo.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -1480,7 +1480,7 @@ static bool xe_ttm_bo_lock_in_destructor
+ * always succeed here, as long as we hold the lru lock.
+ */
+ spin_lock(&ttm_bo->bdev->lru_lock);
+- locked = dma_resv_trylock(ttm_bo->base.resv);
++ locked = dma_resv_trylock(&ttm_bo->base._resv);
+ spin_unlock(&ttm_bo->bdev->lru_lock);
+ xe_assert(xe, locked);
+
+@@ -1500,13 +1500,6 @@ static void xe_ttm_bo_release_notify(str
+ bo = ttm_to_xe_bo(ttm_bo);
+ xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
+
+- /*
+- * Corner case where TTM fails to allocate memory and this BOs resv
+- * still points the VMs resv
+- */
+- if (ttm_bo->base.resv != &ttm_bo->base._resv)
+- return;
+-
+ if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
+ return;
+
+@@ -1516,14 +1509,14 @@ static void xe_ttm_bo_release_notify(str
+ * TODO: Don't do this for external bos once we scrub them after
+ * unbind.
+ */
+- dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
++ dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
+ DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (xe_fence_is_xe_preempt(fence) &&
+ !dma_fence_is_signaled(fence)) {
+ if (!replacement)
+ replacement = dma_fence_get_stub();
+
+- dma_resv_replace_fences(ttm_bo->base.resv,
++ dma_resv_replace_fences(&ttm_bo->base._resv,
+ fence->context,
+ replacement,
+ DMA_RESV_USAGE_BOOKKEEP);
+@@ -1531,7 +1524,7 @@ static void xe_ttm_bo_release_notify(str
+ }
+ dma_fence_put(replacement);
+
+- dma_resv_unlock(ttm_bo->base.resv);
++ dma_resv_unlock(&ttm_bo->base._resv);
+ }
+
+ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
--- /dev/null
+From 3767ca4166ad42fa9e34269efeaf9f15995cd92d Mon Sep 17 00:00:00 2001
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Date: Thu, 11 Dec 2025 22:18:50 -0800
+Subject: drm/xe/eustall: Disallow 0 EU stall property values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+commit 3767ca4166ad42fa9e34269efeaf9f15995cd92d upstream.
+
+An EU stall property value of 0 is invalid and will cause a NPD.
+
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6453
+Fixes: 1537ec85ebd7 ("drm/xe/uapi: Introduce API for EU stall sampling")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Reviewed-by: Harish Chegondi <harish.chegondi@intel.com>
+Link: https://patch.msgid.link/20251212061850.1565459-4-ashutosh.dixit@intel.com
+(cherry picked from commit 5bf763e908bf795da4ad538d21c1ec41f8021f76)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_eu_stall.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_eu_stall.c
++++ b/drivers/gpu/drm/xe/xe_eu_stall.c
+@@ -290,7 +290,7 @@ static int xe_eu_stall_user_ext_set_prop
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(xe_set_eu_stall_property_funcs)) ||
+- XE_IOCTL_DBG(xe, ext.pad))
++ XE_IOCTL_DBG(xe, !ext.property) || XE_IOCTL_DBG(xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_set_eu_stall_property_funcs));
--- /dev/null
+From 3595114bc31d1eb5e1996164c901485c1ffac6f7 Mon Sep 17 00:00:00 2001
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Date: Thu, 11 Dec 2025 22:18:49 -0800
+Subject: drm/xe/oa: Disallow 0 OA property values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+commit 3595114bc31d1eb5e1996164c901485c1ffac6f7 upstream.
+
+An OA property value of 0 is invalid and will cause a NPD.
+
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6452
+Fixes: cc4e6994d5a2 ("drm/xe/oa: Move functions up so they can be reused for config ioctl")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Reviewed-by: Harish Chegondi <harish.chegondi@intel.com>
+Link: https://patch.msgid.link/20251212061850.1565459-3-ashutosh.dixit@intel.com
+(cherry picked from commit 7a100e6ddcc47c1f6ba7a19402de86ce24790621)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1346,7 +1346,7 @@ static int xe_oa_user_ext_set_property(s
+ ARRAY_SIZE(xe_oa_set_property_funcs_config));
+
+ if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
+- XE_IOCTL_DBG(oa->xe, ext.pad))
++ XE_IOCTL_DBG(oa->xe, !ext.property) || XE_IOCTL_DBG(oa->xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
--- /dev/null
+From dcb171931954c51a1a7250d558f02b8f36570783 Mon Sep 17 00:00:00 2001
+From: Sanjay Yadav <sanjay.kumar.yadav@intel.com>
+Date: Tue, 18 Nov 2025 17:19:00 +0530
+Subject: drm/xe/oa: Fix potential UAF in xe_oa_add_config_ioctl()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sanjay Yadav <sanjay.kumar.yadav@intel.com>
+
+commit dcb171931954c51a1a7250d558f02b8f36570783 upstream.
+
+In xe_oa_add_config_ioctl(), we accessed oa_config->id after dropping
+metrics_lock. Since this lock protects the lifetime of oa_config, an
+attacker could guess the id and call xe_oa_remove_config_ioctl() with
+perfect timing, freeing oa_config before we dereference it, leading to
+a potential use-after-free.
+
+Fix this by caching the id in a local variable while holding the lock.
+
+v2: (Matt A)
+- Dropped mutex_unlock(&oa->metrics_lock) ordering change from
+ xe_oa_remove_config_ioctl()
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6614
+Fixes: cdf02fe1a94a7 ("drm/xe/oa/uapi: Add/remove OA config perf ops")
+Cc: <stable@vger.kernel.org> # v6.11+
+Suggested-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Sanjay Yadav <sanjay.kumar.yadav@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patch.msgid.link/20251118114859.3379952-2-sanjay.kumar.yadav@intel.com
+(cherry picked from commit 28aeaed130e8e587fd1b73b6d66ca41ccc5a1a31)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -2407,11 +2407,13 @@ int xe_oa_add_config_ioctl(struct drm_de
+ goto sysfs_err;
+ }
+
+- mutex_unlock(&oa->metrics_lock);
++ id = oa_config->id;
++
++ drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, id);
+
+- drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
++ mutex_unlock(&oa->metrics_lock);
+
+- return oa_config->id;
++ return id;
+
+ sysfs_err:
+ mutex_unlock(&oa->metrics_lock);
--- /dev/null
+From d2d7f5636f0d752a1e0e7eadbbc1839c29177bba Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Fri, 19 Dec 2025 12:32:57 +0100
+Subject: drm/xe/svm: Fix a debug printout
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit d2d7f5636f0d752a1e0e7eadbbc1839c29177bba upstream.
+
+Avoid spamming the log with drm_info(). Use drm_dbg() instead.
+
+Fixes: cc795e041034 ("drm/xe/svm: Make xe_svm_range_needs_migrate_to_vram() public")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Cc: <stable@vger.kernel.org> # v6.17+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Link: https://patch.msgid.link/20251219113320.183860-2-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 72aee5f70ba47b939345a0d3414b51b0639c5b88)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_svm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_svm.c
++++ b/drivers/gpu/drm/xe/xe_svm.c
+@@ -942,7 +942,7 @@ bool xe_svm_range_needs_migrate_to_vram(
+ xe_assert(vm->xe, IS_DGFX(vm->xe));
+
+ if (xe_svm_range_in_vram(range)) {
+- drm_info(&vm->xe->drm, "Range is already in VRAM\n");
++ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
+ return false;
+ }
+
--- /dev/null
+From 80f9c601d9c4d26f00356c0a9c461650e7089273 Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Fri, 12 Dec 2025 10:28:42 -0800
+Subject: drm/xe: Use usleep_range for accurate long-running workload timeslicing
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 80f9c601d9c4d26f00356c0a9c461650e7089273 upstream.
+
+msleep is not very accurate in terms of how long it actually sleeps,
+whereas usleep_range is precise. Replace the timeslice sleep for
+long-running workloads with the more accurate usleep_range to avoid
+jitter if the sleep period is less than 20ms.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Link: https://patch.msgid.link/20251212182847.1683222-3-matthew.brost@intel.com
+(cherry picked from commit ca415c4d4c17ad676a2c8981e1fcc432221dce79)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_guc_submit.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -670,6 +670,24 @@ static u32 wq_space_until_wrap(struct xe
+ return (WQ_SIZE - q->guc->wqi_tail);
+ }
+
++static inline void relaxed_ms_sleep(unsigned int delay_ms)
++{
++ unsigned long min_us, max_us;
++
++ if (!delay_ms)
++ return;
++
++ if (delay_ms > 20) {
++ msleep(delay_ms);
++ return;
++ }
++
++ min_us = mul_u32_u32(delay_ms, 1000);
++ max_us = min_us + 500;
++
++ usleep_range(min_us, max_us);
++}
++
+ static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
+ {
+ struct xe_guc *guc = exec_queue_to_guc(q);
+@@ -1559,7 +1577,7 @@ static void __guc_exec_queue_process_msg
+ since_resume_ms;
+
+ if (wait_ms > 0 && q->guc->resume_time)
+- msleep(wait_ms);
++ relaxed_ms_sleep(wait_ms);
+
+ set_exec_queue_suspended(q);
+ disable_scheduling(q, false);
--- /dev/null
+From 3925683515e93844be204381d2d5a1df5de34f31 Mon Sep 17 00:00:00 2001
+From: "Mario Limonciello (AMD)" <superm1@kernel.org>
+Date: Sat, 29 Nov 2025 19:46:31 -0600
+Subject: Revert "drm/amd: Skip power ungate during suspend for VPE"
+
+From: Mario Limonciello (AMD) <superm1@kernel.org>
+
+commit 3925683515e93844be204381d2d5a1df5de34f31 upstream.
+
+Skipping power ungate exposed some scenarios that will fail
+like below:
+
+```
+amdgpu: Register(0) [regVPEC_QUEUE_RESET_REQ] failed to reach value 0x00000000 != 0x00000001n
+amdgpu 0000:c1:00.0: amdgpu: VPE queue reset failed
+...
+amdgpu: [drm] *ERROR* wait_for_completion_timeout timeout!
+```
+
+The underlying s2idle issue that prompted this commit is going to
+be fixed in BIOS.
+This reverts commit 2a6c826cfeedd7714611ac115371a959ead55bda.
+
+Fixes: 2a6c826cfeed ("drm/amd: Skip power ungate during suspend for VPE")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reported-by: Konstantin <answer2019@yandex.ru>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220812
+Reported-by: Matthew Schwartz <matthew.schwartz@linux.dev>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3416,11 +3416,10 @@ int amdgpu_device_set_pg_state(struct am
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+- /* skip CG for VCE/UVD/VPE, it's handled specially */
++ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
+ adev->ip_blocks[i].version->funcs->set_powergating_state) {
+ /* enable powergating to save power */
platform-x86-alienware-wmi-wmax-add-support-for-new-area-51-laptops.patch
platform-x86-alienware-wmi-wmax-add-awcc-support-for-alienware-x16.patch
platform-x86-alienware-wmi-wmax-add-support-for-alienware-16x-aurora.patch
+revert-drm-amd-skip-power-ungate-during-suspend-for-vpe.patch
+drm-amdgpu-gmc12-add-amdgpu_vm_handle_fault-handling.patch
+drm-amdgpu-forward-vmid-reservation-errors.patch
+drm-amdgpu-add-missing-lock-to-amdgpu_ttm_access_memory_sdma.patch
+drm-amdgpu-sdma6-update-sdma-6.0.3-fw-version-to-include-umq-protected-fence-fix.patch
+drm-amdgpu-gmc11-add-amdgpu_vm_handle_fault-handling.patch
+drm-msm-a6xx-fix-out-of-bound-io-access-in-a6xx_get_gmu_registers.patch
+drm-buddy-optimize-free-block-management-with-rb-tree.patch
+drm-buddy-separate-clear-and-dirty-free-block-trees.patch
+drm-gma500-remove-unused-helper-psb_fbdev_fb_setcolreg.patch
+drm-xe-oa-fix-potential-uaf-in-xe_oa_add_config_ioctl.patch
+drm-rockchip-set-vop-for-the-drm-dma-device.patch
+drm-mediatek-fix-device-node-reference-leak-in-mtk_dp_dt_parse.patch
+drm-mediatek-fix-probe-resource-leaks.patch
+drm-mediatek-fix-probe-memory-leak.patch
+drm-mediatek-fix-probe-device-leaks.patch
+drm-mediatek-mtk_hdmi-fix-probe-device-leaks.patch
+drm-mediatek-ovl_adaptor-fix-probe-device-leaks.patch
+drm-amd-fix-unbind-rebind-for-vcn-4.0.5.patch
+drm-rockchip-vop2-use-ovl_layer_sel-configuration-instead-of-use-win_mask-calculate-used-layers.patch
+drm-bridge-ti-sn65dsi83-ignore-pll_unlock-errors.patch
+drm-nouveau-gsp-allocate-fwsec-sb-at-boot.patch
+drm-amdkfd-export-the-cwsr_size-and-ctl_stack_size-to-userspace.patch
+drm-amdkfd-bump-minimum-vgpr-size-for-gfx1151.patch
+drm-amdkfd-trap-handler-support-for-expert-scheduling-mode.patch
+drm-i915-fix-format-string-truncation-warning.patch
+drm-tilcdc-fix-removal-actions-in-case-of-failed-probe.patch
+drm-ttm-avoid-null-pointer-deref-for-evicted-bos.patch
+drm-mgag200-fix-big-endian-support.patch
+drm-fix-object-leak-in-drm_ioctl_gem_change_handle.patch
+drm-xe-bo-don-t-include-the-ccs-metadata-in-the-dma-buf-sg-table.patch
+drm-xe-oa-disallow-0-oa-property-values.patch
+drm-xe-eustall-disallow-0-eu-stall-property-values.patch
+drm-xe-adjust-long-running-workload-timeslices-to-reasonable-values.patch
+drm-xe-use-usleep_range-for-accurate-long-running-workload-timeslicing.patch
+drm-xe-drop-preempt-fences-when-destroying-imported-dma-bufs.patch
+drm-msm-dpu-add-missing-null-pointer-check-for-pingpong-interface.patch
+drm-msm-add-perfctr_cntl-to-ifpc_reglist.patch
+drm-i915-gem-zero-initialize-the-eb.vma-array-in-i915_gem_do_execbuffer.patch
+drm-xe-svm-fix-a-debug-printout.patch
+drm-pagemap-drm-xe-ensure-that-the-devmem-allocation-is-idle-before-use.patch
+drm-nouveau-dispnv50-don-t-call-drm_atomic_get_crtc_state-in-prepare_fb.patch
+drm-imagination-disallow-exporting-of-pm-fw-protected-objects.patch