]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
BackMerge tag 'v6.19-rc7' into drm-next
authorDave Airlie <airlied@redhat.com>
Wed, 28 Jan 2026 02:44:28 +0000 (12:44 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 28 Jan 2026 02:44:28 +0000 (12:44 +1000)
Linux 6.19-rc7

This is needed for msm and rust trees.

Signed-off-by: Dave Airlie <airlied@redhat.com>
31 files changed:
1  2 
.mailmap
MAINTAINERS
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_gpuvm.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/imagination/pvr_fw_trace.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi_common.c
drivers/gpu/drm/mediatek/mtk_hdmi_common.h
drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panthor/panthor_mmu.c
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue.h
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
drivers/gpu/drm/xe/xe_vm.c
include/drm/display/drm_dp_helper.h
include/drm/drm_bridge.h
include/drm/drm_pagemap.h

diff --cc .mailmap
Simple merge
diff --cc MAINTAINERS
Simple merge
Simple merge
index 0deee72ef9358afa7f95b71dbf8c479f72df7f5b,d261917174280ba99a2a5904e2a218efcc27e3c2..0c21029c446fd36bc0c8213e8c4a2cedbb14e174
@@@ -108,9 -108,10 +108,11 @@@ obj-$(CONFIG_DRM_EXEC) += drm_exec.
  obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
  
  drm_gpusvm_helper-y := \
-       drm_gpusvm.o\
+       drm_gpusvm.o
+ drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \
 -      drm_pagemap.o
 +      drm_pagemap.o\
 +      drm_pagemap_util.o
  obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
  
  obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
index 036316e2b60dc3b5df66a90a8feb1854af968e61,60166919c5b548239c5deb07a601e31519153d84..ab7fed6214e062fefe7d638fc44c0767485b60ce
@@@ -164,9 -163,9 +164,10 @@@ struct dw_hdmi_qp 
  
        unsigned long ref_clk_rate;
        struct regmap *regm;
+       int main_irq;
  
        unsigned long tmds_char_rate;
 +      bool no_hpd;
  };
  
  static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val,
Simple merge
Simple merge
index de73c8ab8cca83e440b8fb49d72afa3d045c88da,f13eb5f36e8a97b0773e1dd8d8418cd1c4d0853f..3871a6d92f776442bac809577592a0e3a53b5e2a
@@@ -96,13 -94,11 +96,13 @@@ err_release
  }
  
  /**
-  * drm_gem_shmem_init - Initialize an allocated object of the given size
+  * drm_gem_shmem_init - Initialize an allocated object.
   * @dev: DRM device
 - * @shmem: The allocated shmem GEM object.
 + * @shmem: shmem GEM object to initialize
-  * @size: Size of the object to initialize
+  * @size: Buffer size in bytes
   *
 + * This function initializes an allocated shmem GEM object.
 + *
   * Returns:
   * 0 on success, or a negative error code on failure.
   */
@@@ -903,67 -894,6 +903,67 @@@ fail_detach
  }
  EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
  
 +/*
 + * Kunit helpers
 + */
 +
 +#if IS_ENABLED(CONFIG_KUNIT)
 +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
 +{
 +      struct drm_gem_object *obj = &shmem->base;
 +      int ret;
 +
 +      ret = dma_resv_lock_interruptible(obj->resv, NULL);
 +      if (ret)
 +              return ret;
 +      ret = drm_gem_shmem_vmap_locked(shmem, map);
 +      dma_resv_unlock(obj->resv);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vmap);
 +
 +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
 +{
 +      struct drm_gem_object *obj = &shmem->base;
 +
 +      dma_resv_lock_interruptible(obj->resv, NULL);
 +      drm_gem_shmem_vunmap_locked(shmem, map);
 +      dma_resv_unlock(obj->resv);
 +}
 +EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vunmap);
 +
 +int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
 +{
 +      struct drm_gem_object *obj = &shmem->base;
 +      int ret;
 +
 +      ret = dma_resv_lock_interruptible(obj->resv, NULL);
 +      if (ret)
 +              return ret;
 +      ret = drm_gem_shmem_madvise_locked(shmem, madv);
 +      dma_resv_unlock(obj->resv);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_madvise);
 +
 +int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
 +{
 +      struct drm_gem_object *obj = &shmem->base;
 +      int ret;
 +
 +      ret = dma_resv_lock_interruptible(obj->resv, NULL);
 +      if (ret)
 +              return ret;
 +      drm_gem_shmem_purge_locked(shmem);
 +      dma_resv_unlock(obj->resv);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_purge);
 +#endif
 +
  MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
  MODULE_IMPORT_NS("DMA_BUF");
- MODULE_LICENSE("GPL v2");
+ MODULE_LICENSE("GPL");
Simple merge
Simple merge
index 93119f0f23a921e35b9ad2957bc8c4ef2edac20f,99d681413effb95cf172b82e3f05afea59a5eefa..673ee71276e9b33282a22dc43f39c39073f70166
@@@ -207,16 -137,17 +207,17 @@@ update_logtype(struct pvr_device *pvr_d
        struct rogue_fwif_kccb_cmd cmd;
        int idx;
        int err;
+       int slot;
  
 -      if (group_mask)
 -              fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask;
 -      else
 -              fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
 +      /* No change in group_mask => nothing to update. */
 +      if (fw_trace->group_mask == group_mask)
 +              return 0;
  
        fw_trace->group_mask = group_mask;
 +      fw_trace->tracebuf_ctrl->log_type = build_log_type(group_mask);
  
        down_read(&pvr_dev->reset_sem);
 -      if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
 +      if (!drm_dev_enter(drm_dev, &idx)) {
                err = -EIO;
                goto err_up_read;
        }
Simple merge
Simple merge
Simple merge
Simple merge
index ac9e4b6bd2ebc0b58f824ec1789b547e74cacb59,d7dc83cf7b00ac2e2e6ea3c34e56c6065bcbbfa8..ac7b1d12a0f59469a2182cbac47edd7fcb9a0ea9
@@@ -295,8 -294,8 +295,8 @@@ static int pl111_amba_probe(struct amba
        ret = devm_request_irq(dev, amba_dev->irq[0], pl111_irq, 0,
                               variant->name, priv);
        if (ret != 0) {
 -              dev_err(dev, "%s failed irq %d\n", __func__, ret);
 +              drm_err(drm, "%s failed irq %d\n", __func__, ret);
-               return ret;
+               goto dev_put;
        }
  
        ret = pl111_modeset_init(drm);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 46e9c58f09e01683eab4a1a57fd24df034ed1d52,eb29e5309f0ab6a03a0b2c874e975c621b266550..2baf0861f78f30decb1f054dcb48c33def290832
@@@ -243,56 -209,19 +243,63 @@@ struct drm_pagemap_devmem_ops 
                           struct dma_fence *pre_migrate_fence);
  };
  
- #if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ #if IS_ENABLED(CONFIG_ZONE_DEVICE)
 +int drm_pagemap_init(struct drm_pagemap *dpagemap,
 +                   struct dev_pagemap *pagemap,
 +                   struct drm_device *drm,
 +                   const struct drm_pagemap_ops *ops);
 +
 +struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
 +                                     struct dev_pagemap *pagemap,
 +                                     const struct drm_pagemap_ops *ops);
 +
+ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
  
 +void drm_pagemap_put(struct drm_pagemap *dpagemap);
 +
  #else
  
- #endif /* IS_ENABLED(CONFIG_DRM_GPUSVM) */
+ static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
+ {
+       return NULL;
+ }
 +static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
 +{
 +}
 +
+ #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
  
 +/**
 + * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
 + * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
 + *
 + * Return: Pointer to the struct drm_pagemap, or NULL.
 + */
 +static inline struct drm_pagemap *
 +drm_pagemap_get(struct drm_pagemap *dpagemap)
 +{
 +      if (likely(dpagemap))
 +              kref_get(&dpagemap->ref);
 +
 +      return dpagemap;
 +}
 +
 +/**
 + * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
 + * unless the current reference count is zero.
 + * @dpagemap: Pointer to the drm_pagemap or NULL.
 + *
 + * Return: A pointer to @dpagemap if the reference count was successfully
 + * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
 + */
 +static inline struct drm_pagemap * __must_check
 +drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
 +{
 +      return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
 +}
 +
  /**
   * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
   *
@@@ -317,23 -246,8 +324,25 @@@ struct drm_pagemap_devmem 
        struct dma_fence *pre_migrate_fence;
  };
  
 +/**
 + * struct drm_pagemap_migrate_details - Details to govern migration.
 + * @timeslice_ms: The time requested for the migrated pagemap pages to
 + * be present in @mm before being allowed to be migrated back.
 + * @can_migrate_same_pagemap: Whether the copy function as indicated by
 + * the @source_peer_migrates flag, can migrate device pages within a
 + * single drm_pagemap.
 + * @source_peer_migrates: Whether on p2p migration, The source drm_pagemap
 + * should use the copy_to_ram() callback rather than the destination
 + * drm_pagemap should use the copy_to_devmem() callback.
 + */
 +struct drm_pagemap_migrate_details {
 +      unsigned long timeslice_ms;
 +      u32 can_migrate_same_pagemap : 1;
 +      u32 source_peer_migrates : 1;
 +};
 +
+ #if IS_ENABLED(CONFIG_ZONE_DEVICE)
  int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
                                  struct mm_struct *mm,
                                  unsigned long start, unsigned long end,
@@@ -356,7 -269,6 +363,10 @@@ int drm_pagemap_populate_mm(struct drm_
                            struct mm_struct *mm,
                            unsigned long timeslice_ms);
  
 +void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
 +
 +int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
++
+ #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
  #endif