if (intel_fbdev_fb_prefer_stolen(drm, size)) {
obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_FORCE_WC |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT, false);
+ XE_BO_FLAG_GGTT,
+ false);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_FORCE_WC |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT, false);
+ XE_BO_FLAG_GGTT,
+ false);
}
if (IS_ERR(obj)) {
if (ret)
goto err;
- if (!(bo->flags & XE_BO_FLAG_SCANOUT)) {
+ if (!(bo->flags & XE_BO_FLAG_FORCE_WC)) {
/*
- * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
+ * XE_BO_FLAG_FORCE_WC should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
* mode when the bo is VM_BINDed, so we can only set
* coherency with display when unbound.
ret = -EINVAL;
goto err;
}
- bo->flags |= XE_BO_FLAG_SCANOUT;
+ bo->flags |= XE_BO_FLAG_FORCE_WC;
}
ttm_bo_unreserve(&bo->ttm);
return 0;
PAGE_ALIGN(size),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false);
+ XE_BO_FLAG_FORCE_WC |
+ XE_BO_FLAG_GGTT,
+ false);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_pin_map;
return 0;
/* We reject creating !SCANOUT fb's, so this is weird.. */
- drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
+ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_FORCE_WC));
vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment);
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
+ flags = XE_BO_FLAG_FORCE_WC | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
/*
- * Display scanout is always non-coherent with the CPU cache.
- *
* For Xe_LPG and beyond up to NVL-P (excluding), PPGTT PTE
* lookups are also non-coherent and require a CPU:WC mapping.
*/
- if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
- (!xe->info.has_cached_pt && bo->flags & XE_BO_FLAG_PAGETABLE))
+ if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_FORCE_WC) ||
+ (!xe->info.has_cached_pt && bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined;
}
if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
bo_flags |= XE_BO_FLAG_DEFER_BACKING;
+ /*
+ * Display scanout is always non-coherent with the CPU cache.
+ */
if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
- bo_flags |= XE_BO_FLAG_SCANOUT;
+ bo_flags |= XE_BO_FLAG_FORCE_WC;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION) {
if (XE_IOCTL_DBG(xe, GRAPHICS_VER(xe) < 20))
/* CCS formats need physical placement at a 64K alignment in VRAM. */
if ((bo_flags & XE_BO_FLAG_VRAM_MASK) &&
- (bo_flags & XE_BO_FLAG_SCANOUT) &&
+ (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) &&
!(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) &&
IS_ALIGNED(args->size, SZ_64K))
bo_flags |= XE_BO_FLAG_NEEDS_64K;
args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
+ if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_FORCE_WC &&
args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
return -EINVAL;
bo = xe_bo_create_user(xe, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_FORCE_WC |
XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL);
if (IS_ERR(bo))
return PTR_ERR(bo);
#define XE_BO_FLAG_PINNED BIT(7)
#define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
#define XE_BO_FLAG_DEFER_BACKING BIT(9)
-#define XE_BO_FLAG_SCANOUT BIT(10)
+#define XE_BO_FLAG_FORCE_WC BIT(10)
#define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
#define XE_BO_FLAG_PAGETABLE BIT(12)
#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)