From: Tvrtko Ursulin Date: Tue, 18 Jan 2022 10:54:02 +0000 (+0000) Subject: Merge drm/drm-next into drm-intel-gt-next X-Git-Tag: v5.18-rc1~134^2~15^2^2~39 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=647bfd26bf054313305ea9c2c4a1c71f3bbfee63;p=thirdparty%2Fkernel%2Flinux.git Merge drm/drm-next into drm-intel-gt-next Maarten needs backmerge to account for header file renames/changes which landed via drm-intel-next and are interfering with his pinning work. Signed-off-by: Tvrtko Ursulin --- 647bfd26bf054313305ea9c2c4a1c71f3bbfee63 diff --cc drivers/gpu/drm/i915/display/intel_fbc.c index 9208d52e3cecf,160fd2bdafe59..c0a973eeb4059 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@@ -259,168 -423,215 +423,215 @@@ static void g4x_fbc_deactivate(struct i } } - static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) + static bool g4x_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; } - static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv) + static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; + } - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), - intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); + static void g4x_fbc_program_cfb(struct intel_fbc *fbc) + { + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start); } - static void i965_fbc_recompress(struct drm_i915_private *dev_priv) + static const struct intel_fbc_funcs g4x_fbc_funcs = { + .activate = g4x_fbc_activate, + .deactivate = g4x_fbc_deactivate, + .is_active = g4x_fbc_is_active, + .is_compressing = g4x_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = g4x_fbc_program_cfb, + }; + + static void ilk_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), - intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_de_write(i915, ILK_DPFC_FENCE_YOFF, + fbc_state->fence_y_offset); + + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } - /* This function forces a CFB recompression through the nuke operation. */ - static void snb_fbc_recompress(struct drm_i915_private *dev_priv) + static void ilk_fbc_deactivate(struct intel_fbc *fbc) { - intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); - intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); + struct drm_i915_private *i915 = fbc->i915; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl); + } + } + + static bool ilk_fbc_is_active(struct intel_fbc *fbc) + { + return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN; } - static void intel_fbc_recompress(struct drm_i915_private *dev_priv) + static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK; + } - trace_intel_fbc_nuke(fbc->crtc); + static void ilk_fbc_program_cfb(struct intel_fbc *fbc) + { + struct drm_i915_private *i915 = fbc->i915; - if (DISPLAY_VER(dev_priv) >= 6) - snb_fbc_recompress(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 4) - i965_fbc_recompress(dev_priv); - else - i8xx_fbc_recompress(dev_priv); + intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); } - static void ilk_fbc_activate(struct drm_i915_private *dev_priv) + static const struct intel_fbc_funcs ilk_fbc_funcs = { + .activate = ilk_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, + }; + + static void snb_fbc_program_fence(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - u32 dpfc_ctl; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 ctl = 0; - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (fbc_state->fence_id >= 0) + ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); + intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); + intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); + } - if (params->fence_id >= 0) { - dpfc_ctl |= DPFC_CTL_FENCE_EN; - if (IS_IRONLAKE(dev_priv)) - dpfc_ctl |= params->fence_id; - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } - } else { - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } - } + static void snb_fbc_activate(struct intel_fbc *fbc) + { + snb_fbc_program_fence(fbc); - intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, - params->fence_y_offset); - /* enable it... */ - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + ilk_fbc_activate(fbc); } - static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) + static void snb_fbc_nuke(struct intel_fbc *fbc) { - u32 dpfc_ctl; + struct drm_i915_private *i915 = fbc->i915; - /* Disable compression */ - dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); - } + intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(i915, MSG_FBC_REND_STATE); + } + + static const struct intel_fbc_funcs snb_fbc_funcs = { + .activate = snb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, + }; + + static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) + { + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; + + if (fbc_state->override_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | + FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); + + intel_de_write(i915, GLK_FBC_STRIDE, val); } - static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) + static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; + + /* Display WA #0529: skl, kbl, bxt. */ + if (fbc_state->override_cfb_stride) + val |= CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); + + intel_de_rmw(i915, CHICKEN_MISC_4, + CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE_MASK, val); } - static void gen7_fbc_activate(struct drm_i915_private *dev_priv) + static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; - if (DISPLAY_VER(dev_priv) >= 10) { - u32 val = 0; + dpfc_ctl = g4x_dpfc_ctl_limit(fbc); - if (params->override_cfb_stride) - val |= FBC_STRIDE_OVERRIDE | - FBC_STRIDE(params->override_cfb_stride / fbc->limit); + if (IS_IVYBRIDGE(i915)) + dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); - intel_de_write(dev_priv, GLK_FBC_STRIDE, val); - } else if (DISPLAY_VER(dev_priv) == 9) { - u32 val = 0; + if (fbc_state->fence_id >= 0) + dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; - /* Display WA #0529: skl, kbl, bxt. */ - if (params->override_cfb_stride) - val |= CHICKEN_FBC_STRIDE_OVERRIDE | - CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit); + if (fbc->false_color) + dpfc_ctl |= DPFC_CTL_FALSE_COLOR; - intel_de_rmw(dev_priv, CHICKEN_MISC_4, - CHICKEN_FBC_STRIDE_OVERRIDE | - CHICKEN_FBC_STRIDE_MASK, val); - } + return dpfc_ctl; + } - dpfc_ctl = 0; - if (IS_IVYBRIDGE(dev_priv)) - dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); - - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); - - if (params->fence_id >= 0) { - dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } else if (to_gt(dev_priv)->ggtt->num_fences) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } + static void ivb_fbc_activate(struct intel_fbc *fbc) + { + struct drm_i915_private *i915 = fbc->i915; + + if (DISPLAY_VER(i915) >= 10) + glk_fbc_program_cfb_stride(fbc); + else if (DISPLAY_VER(i915) == 9) + skl_fbc_program_cfb_stride(fbc); - if (dev_priv->fbc.false_color) - dpfc_ctl |= FBC_CTL_FALSE_COLOR; - if (i915->ggtt.num_fences) ++ if (to_gt(i915)->ggtt->num_fences) + snb_fbc_program_fence(fbc); - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); } - static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) + static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) { - if (DISPLAY_VER(dev_priv) >= 5) - return ilk_fbc_is_active(dev_priv); - else if (IS_GM45(dev_priv)) - return g4x_fbc_is_active(dev_priv); - else - return i8xx_fbc_is_active(dev_priv); + return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB; } - static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) + static void ivb_fbc_set_false_color(struct intel_fbc *fbc, + bool enable) { - struct intel_fbc *fbc = &dev_priv->fbc; + intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL, + DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); + } - trace_intel_fbc_activate(fbc->crtc); + static const struct intel_fbc_funcs ivb_fbc_funcs = { + .activate = ivb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ivb_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, + .set_false_color = ivb_fbc_set_false_color, + }; + + static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) + { + return fbc->funcs->is_active(fbc); + } + + static void intel_fbc_hw_activate(struct intel_fbc *fbc) + { + trace_intel_fbc_activate(fbc->state.plane); fbc->active = true; fbc->activated = true; diff --cc drivers/gpu/drm/i915/gt/intel_ggtt.c index 91ea2882efda8,5263dda7f8d52..a1b2761bc16e1 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@@ -1257,7 -1280,7 +1284,7 @@@ bool i915_ggtt_resume_vm(struct i915_ad atomic_read(&vma->flags) & I915_VMA_BIND_MASK; GEM_BUG_ON(!was_bound); - vma->ops->bind_vma(&ggtt->vm, NULL, vma->resource, - vma->ops->bind_vma(vm, NULL, vma, ++ vma->ops->bind_vma(vm, NULL, vma->resource, obj ? obj->cache_level : 0, was_bound); if (obj) { /* only used during resume => exclusive access */ diff --cc drivers/gpu/drm/i915/gt/intel_gt.c index 3ebf8136e44bd,f98f0fb21efbe..298ff32c8d0c1 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@@ -3,7 -3,7 +3,8 @@@ * Copyright © 2019 Intel Corporation */ +#include + #include #include "intel_gt_debugfs.h" diff --cc drivers/gpu/drm/i915/i915_driver.c index d61b4a3016f8c,95174938b1602..5f2343389b5e9 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@@ -1144,7 -1144,9 +1146,9 @@@ static int i915_drm_suspend(struct drm_ intel_suspend_hw(dev_priv); + /* Must be called before GGTT is suspended. */ + intel_dpt_suspend(dev_priv); - i915_ggtt_suspend(&dev_priv->ggtt); + i915_ggtt_suspend(to_gt(dev_priv)->ggtt); i915_save_display(dev_priv); @@@ -1259,7 -1270,9 +1272,9 @@@ static int i915_drm_resume(struct drm_d if (ret) drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); - i915_ggtt_resume(&dev_priv->ggtt); + i915_ggtt_resume(to_gt(dev_priv)->ggtt); + /* Must be called after GGTT is resumed. */ + intel_dpt_resume(dev_priv); intel_dmc_ucode_resume(dev_priv); diff --cc drivers/gpu/drm/i915/i915_vma_types.h index 5b7da0ee5d640,ca575e129ced6..88370dadca820 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@@ -95,15 -95,22 +95,24 @@@ enum i915_cache_level * */ +struct i915_vma_resource; + struct intel_remapped_plane_info { /* in gtt pages */ - u32 offset; - u16 width; - u16 height; - u16 src_stride; - u16 dst_stride; + u32 offset:31; + u32 linear:1; + union { + /* in gtt pages for !linear */ + struct { + u16 width; + u16 height; + u16 src_stride; + u16 dst_stride; + }; + + /* in gtt pages for linear */ + u32 size; + }; } __packed; struct intel_remapped_info {