obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
drm_gpusvm_helper-y := \
- drm_gpusvm.o\
+ drm_gpusvm.o
+ drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \
- drm_pagemap.o
+ drm_pagemap.o\
+ drm_pagemap_util.o
+
obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
unsigned long ref_clk_rate;
struct regmap *regm;
+ int main_irq;
unsigned long tmds_char_rate;
+ bool no_hpd;
};
static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val,
}
/**
- * drm_gem_shmem_init - Initialize an allocated object of the given size
+ * drm_gem_shmem_init - Initialize an allocated object.
* @dev: DRM device
- * @shmem: The allocated shmem GEM object.
+ * @shmem: shmem GEM object to initialize
- * @size: Size of the object to initialize
+ * @size: Buffer size in bytes
*
+ * This function initializes an allocated shmem GEM object.
+ *
* Returns:
* 0 on success, or a negative error code on failure.
*/
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
+/*
+ * Kunit helpers
+ */
+
+#if IS_ENABLED(CONFIG_KUNIT)
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ ret = dma_resv_lock_interruptible(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_vmap_locked(shmem, map);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
+}
+EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vmap);
+
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = &shmem->base;
+
+ dma_resv_lock_interruptible(obj->resv, NULL);
+ drm_gem_shmem_vunmap_locked(shmem, map);
+ dma_resv_unlock(obj->resv);
+}
+EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vunmap);
+
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ ret = dma_resv_lock_interruptible(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_madvise_locked(shmem, madv);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
+}
+EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_madvise);
+
+int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ ret = dma_resv_lock_interruptible(obj->resv, NULL);
+ if (ret)
+ return ret;
+ drm_gem_shmem_purge_locked(shmem);
+ dma_resv_unlock(obj->resv);
+
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_purge);
+#endif
+
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
- MODULE_LICENSE("GPL v2");
+ MODULE_LICENSE("GPL");
struct rogue_fwif_kccb_cmd cmd;
int idx;
int err;
+ int slot;
- if (group_mask)
- fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask;
- else
- fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
+ /* No change in group_mask => nothing to update. */
+ if (fw_trace->group_mask == group_mask)
+ return 0;
fw_trace->group_mask = group_mask;
+ fw_trace->tracebuf_ctrl->log_type = build_log_type(group_mask);
down_read(&pvr_dev->reset_sem);
- if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
+ if (!drm_dev_enter(drm_dev, &idx)) {
err = -EIO;
goto err_up_read;
}
ret = devm_request_irq(dev, amba_dev->irq[0], pl111_irq, 0,
variant->name, priv);
if (ret != 0) {
- dev_err(dev, "%s failed irq %d\n", __func__, ret);
+ drm_err(drm, "%s failed irq %d\n", __func__, ret);
- return ret;
+ goto dev_put;
}
ret = pl111_modeset_init(drm);
struct dma_fence *pre_migrate_fence);
};
- #if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ #if IS_ENABLED(CONFIG_ZONE_DEVICE)
+
+int drm_pagemap_init(struct drm_pagemap *dpagemap,
+ struct dev_pagemap *pagemap,
+ struct drm_device *drm,
+ const struct drm_pagemap_ops *ops);
+
+struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
+ struct dev_pagemap *pagemap,
+ const struct drm_pagemap_ops *ops);
+
+ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+void drm_pagemap_put(struct drm_pagemap *dpagemap);
+
#else
- #endif /* IS_ENABLED(CONFIG_DRM_GPUSVM) */
+ static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
+ {
+ return NULL;
+ }
+
+static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
+{
+}
+
+ #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
+/**
+ * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
+ * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
+ *
+ * Return: Pointer to the struct drm_pagemap, or NULL.
+ */
+static inline struct drm_pagemap *
+drm_pagemap_get(struct drm_pagemap *dpagemap)
+{
+ if (likely(dpagemap))
+ kref_get(&dpagemap->ref);
+
+ return dpagemap;
+}
+
+/**
+ * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
+ * unless the current reference count is zero.
+ * @dpagemap: Pointer to the drm_pagemap or NULL.
+ *
+ * Return: A pointer to @dpagemap if the reference count was successfully
+ * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
+ */
+static inline struct drm_pagemap * __must_check
+drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
+{
+ return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
+}
+
/**
* struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
*
struct dma_fence *pre_migrate_fence;
};
+/**
+ * struct drm_pagemap_migrate_details - Details to govern migration.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @can_migrate_same_pagemap: Whether the copy function as indicated by
+ * the @source_peer_migrates flag, can migrate device pages within a
+ * single drm_pagemap.
+ * @source_peer_migrates: Whether on p2p migration, The source drm_pagemap
+ * should use the copy_to_ram() callback rather than the destination
+ * drm_pagemap should use the copy_to_devmem() callback.
+ */
+struct drm_pagemap_migrate_details {
+ unsigned long timeslice_ms;
+ u32 can_migrate_same_pagemap : 1;
+ u32 source_peer_migrates : 1;
+};
+
+ #if IS_ENABLED(CONFIG_ZONE_DEVICE)
+
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct mm_struct *mm,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms);
+void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
+
+int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
++
+ #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
+
#endif