i915_gem_stolen_node_offset(fbc->compressed_llb),
U32_MAX));
intel_de_write(display, FBC_CFB_BASE,
- i915_gem_stolen_node_address(i915, fbc->compressed_fb));
+ i915_gem_stolen_node_address(fbc->compressed_fb));
intel_de_write(display, FBC_LL_BASE,
- i915_gem_stolen_node_address(i915, fbc->compressed_llb));
+ i915_gem_stolen_node_address(fbc->compressed_llb));
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
unsigned int size, int min_limit)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u64 end = intel_fbc_stolen_end(display);
int ret, limit = min_limit;
size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = i915_gem_stolen_insert_node_in_range(i915, fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size <<= 1, 4096, 0, end);
if (ret == 0)
return limit;
for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
- ret = i915_gem_stolen_insert_node_in_range(i915, fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
return limit;
i915_gem_stolen_node_allocated(fbc->compressed_llb));
if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
- ret = i915_gem_stolen_insert_node(i915, fbc->compressed_llb,
- 4096, 4096);
+ ret = i915_gem_stolen_insert_node(fbc->compressed_llb, 4096, 4096);
if (ret)
goto err;
}
err_llb:
if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, fbc->compressed_llb);
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
err:
if (i915_gem_stolen_initialized(i915))
drm_info_once(display->drm,
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
- struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, fbc->compressed_llb);
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
if (i915_gem_stolen_node_allocated(fbc->compressed_fb))
- i915_gem_stolen_remove_node(i915, fbc->compressed_fb);
+ i915_gem_stolen_remove_node(fbc->compressed_fb);
}
void intel_fbc_cleanup(struct intel_display *display)
return ret;
}
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct intel_stolen_node *node, u64 size,
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
unsigned int alignment, u64 start, u64 end)
{
- return __i915_gem_stolen_insert_node_in_range(i915, &node->node,
+ return __i915_gem_stolen_insert_node_in_range(node->i915, &node->node,
size, alignment,
start, end);
}
U64_MAX);
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct intel_stolen_node *node, u64 size,
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
unsigned int alignment)
{
- return __i915_gem_stolen_insert_node(i915, &node->node, size, alignment);
+ return __i915_gem_stolen_insert_node(node->i915, &node->node, size, alignment);
}
static void __i915_gem_stolen_remove_node(struct drm_i915_private *i915,
mutex_unlock(&i915->mm.stolen_lock);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct intel_stolen_node *node)
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
{
- __i915_gem_stolen_remove_node(i915, &node->node);
+ __i915_gem_stolen_remove_node(node->i915, &node->node);
}
static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
return resource_size(&i915->dsm.stolen);
}
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct intel_stolen_node *node)
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node)
{
+ struct drm_i915_private *i915 = node->i915;
+
return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
}
struct drm_i915_private;
struct intel_stolen_node;
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct intel_stolen_node *node, u64 size,
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct intel_stolen_node *node, u64 size,
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct intel_stolen_node *node);
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915);
u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915);
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node);
bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node);
u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node);
struct intel_stolen_node;
struct xe_device;
-int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
- struct intel_stolen_node *node,
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node,
u32 size, u32 align,
u32 start, u32 end);
-int i915_gem_stolen_insert_node(struct xe_device *xe,
- struct intel_stolen_node *node,
- u32 size, u32 align);
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u32 size, u32 align);
-void i915_gem_stolen_remove_node(struct xe_device *xe,
- struct intel_stolen_node *node);
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
bool i915_gem_stolen_initialized(struct xe_device *xe);
u64 i915_gem_stolen_area_size(const struct xe_device *xe);
-u64 i915_gem_stolen_node_address(struct xe_device *xe,
- struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node);
u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node);
struct xe_bo *bo;
};
-int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
- struct intel_stolen_node *node,
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node,
u32 size, u32 align,
u32 start, u32 end)
{
+ struct xe_device *xe = node->xe;
+
struct xe_bo *bo;
int err = 0;
u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
return err;
}
-int i915_gem_stolen_insert_node(struct xe_device *xe,
- struct intel_stolen_node *node,
- u32 size, u32 align)
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u32 size, u32 align)
{
/* Not used on xe */
WARN_ON(1);
return -ENODEV;
}
-void i915_gem_stolen_remove_node(struct xe_device *xe,
- struct intel_stolen_node *node)
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
{
xe_bo_unpin_map_no_vm(node->bo);
node->bo = NULL;
return 0;
}
-u64 i915_gem_stolen_node_address(struct xe_device *xe,
- struct intel_stolen_node *node)
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node)
{
+ struct xe_device *xe = node->xe;
+
return xe_ttm_stolen_gpu_offset(xe) + i915_gem_stolen_node_offset(node);
}