]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'drm-intel-next-2018-09-06-2' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <airlied@redhat.com>
Tue, 11 Sep 2018 01:52:54 +0000 (11:52 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 11 Sep 2018 01:53:12 +0000 (11:53 +1000)
Merge tag 'gvt-next-2018-09-04'
drm-intel-next-2018-09-06-1:
UAPI Changes:
- GGTT coherency GETPARAM: GGTT has turned out to be non-coherent for some
  platforms, which we've failed to communicate to userspace so far. SNA was
  modified to do extra flushing on non-coherent GGTT access, while Mesa will
  mitigate by always requiring WC mapping (which is non-coherent anyway).
- Neuter Resource Streamer uAPI: There never really were users for the feature,
  so neuter it while keeping the interface bits for compatibility. This is a
  long due item from past.

Cross-subsystem Changes:
- Backmerge of branch drm-next-4.19 for DP_DPCD_REV_14 changes

Core Changes:
- None

Driver Changes:

- A load of Icelake (ICL) enabling patches (Paulo, Manasi)
- Enabled full PPGTT for IVB,VLV and HSW (Chris)
- Bugzilla #107113: Distribute DDB based on display resolutions (Mahesh)
- Bugzillas #100023,#107476,#94921: Support limited range DP displays (Jani)
- Bugzilla #107503: Increase LSPCON timeout (Fredrik)
- Avoid boosting GPU due to an occasional stall in interactive workloads (Chris)
- Apply GGTT coherency W/A only for affected systems instead of all (Chris)
- Fix for infinite link training loop for faulty USB-C MST hubs (Nathan)
- Keep KMS functional on Gen4 and earlier when GPU is wedged (Chris)
- Stop holding ppGTT reference from closed VMAs (Chris)
- Clear error registers after error capture (Lionel)
- Various Icelake fixes (Anusha, Jyoti, Ville, Tvrtko)
- Add missing Coffeelake (CFL) PCI IDs (Rodrigo)
- Flush execlists tasklet directly from reset-finish (Chris)
- Fix LPE audio runtime PM (Chris)
- Fix detection of out of range surface positions (GLK/CNL) (Ville)
- Remove wait-for-idle for PSR2 (Dhinakaran)
- Power down existing display hardware resources when display is disabled (Chris)
- Don't allow runtime power management if RC6 doesn't exist (Chris)
- Add debugging checks for runtime power management paths (Imre)
- Increase symmetry in display power init/fini paths (Imre)
- Isolate GVT specific macros from i915_reg.h (Lucas)
- Increase symmetry in power management enable/disable paths (Chris)
- Increase IP disable timeout to 100 ms to avoid DRM_ERROR (Imre)
- Fix memory leak from HDMI HDCP write function (Brian, Rodrigo)
- Reject Y/Yf tiling on interlaced modes (Ville)
- Use a cached mapping for the physical HWS on older gens (Chris)
- Force slow path of writing relocations to buffer if unable to write to userspace (Chris)
- Do a full device reset after being wedged (Chris)
- Keep forcewake counts over reset (in case of debugfs user) (Imre, Chris)
- Avoid false-positive errors from power wells during init (Imre)
- Reset engines forcibly in exchange of declaring whole device wedged (Mika)
- Reduce context HW ID lifetime in preparation for Icelake (Chris)
- Attempt to recover from module load failures (Chris)
- Keep select interrupts over a reset to avoid missing/losing them (Chris)
- GuC submission backend improvements (Jakub)
- Terminate context images with BB_END (Chris, Lionel)
- Make GCC evaluate GGTT view struct size assertions again (Ville)
- Add selftest to exercise suspend/hibernate code-paths for GEM (Chris)
- Use a full emulation of a user ppgtt context in selftests (Chris)
- Exercise resetting in the middle of a wait-on-fence in selftests (Chris)
- Fix coherency issues on selftests for Baytrail (Chris)
- Various other GEM fixes / self-test updates (Chris, Matt)
- GuC doorbell self-tests (Daniele)
- PSR mode control through debugfs for IGTs (Maarten)
- Degrade expected WM latency errors to DRM_DEBUG_KMS (Chris)
- Cope with errors better in MST link training (Dhinakaran)
- Fix WARN on KBL external displays (Azhar)
- Power well code cleanups (Imre)
- Fixes to PSR debugging (Dhinakaran)
- Make forcewake errors louder for easier catching in CI (WARNs) (Chris)
- Fortify tiling code against programmer errors (Chris)
- Bunch of fixes for CI exposed corner cases (multiple authors, mostly Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180907105446.GA22860@jlahtine-desk.ger.corp.intel.com
87 files changed:
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/edid.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/mmio_context.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/page_track.c
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dpll_mgr.h
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_guc.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_ads.c
drivers/gpu/drm/i915/intel_guc_ct.c
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_log.c
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_guc_submission.h
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/i915/intel_hdcp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_huc.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lrc_reg.h
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_mocs.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uc_fw.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_wopcm.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem.c [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
drivers/gpu/drm/i915/selftests/intel_guc.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/mock_context.c
drivers/gpu/drm/i915/selftests/mock_gtt.c
include/drm/i915_pciids.h
include/uapi/drm/i915_drm.h

index 459f8f88a34cda0747ca37e7ccf43c4ca64fe283..9e36ffb5eb7cdd049140ab9e4eeccc53c1020813 100644 (file)
@@ -30,6 +30,7 @@ config DRM_I915_DEBUG
        select SW_SYNC # signaling validation framework (igt/syncobj*)
        select DRM_I915_SW_FENCE_DEBUG_OBJECTS
        select DRM_I915_SELFTEST
+       select DRM_I915_DEBUG_RUNTIME_PM
         default n
         help
           Choose this option to turn on extra driver debugging that may affect
@@ -167,3 +168,14 @@ config DRM_I915_DEBUG_VBLANK_EVADE
          the vblank.
 
          If in doubt, say "N".
+
+config DRM_I915_DEBUG_RUNTIME_PM
+       bool "Enable extra state checking for runtime PM"
+       depends on DRM_I915
+       default n
+       help
+         Choose this option to turn on extra state checking for the
+         runtime PM functionality. This may introduce overhead during
+         driver loading, suspend and resume operations.
+
+         If in doubt, say "N"
index c62346fdc05d5f241bda610fa7b4ceb901b75553..19cf1bbe059d49018d13e274c8243c4203667320 100644 (file)
@@ -56,6 +56,10 @@ static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
 
 /**
  * vgpu_pci_cfg_mem_write - write virtual cfg space memory
+ * @vgpu: target vgpu
+ * @off: offset
+ * @src: src ptr to write
+ * @bytes: number of bytes
  *
  * Use this function to write virtual cfg space memory.
  * For standard cfg space, only RW bits can be changed,
@@ -91,6 +95,10 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
 
 /**
  * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
+ * @vgpu: target vgpu
+ * @offset: offset
+ * @p_data: return data ptr
+ * @bytes: number of bytes to read
  *
  * Returns:
  * Zero on success, negative error code if failed.
@@ -278,6 +286,10 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 /**
  * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
+ * @vgpu: target vgpu
+ * @offset: offset
+ * @p_data: write data ptr
+ * @bytes: number of bytes to write
  *
  * Returns:
  * Zero on success, negative error code if failed.
index a614db310ea276a5deca674363c09db44e79ac79..77edbfcb0f75ea28b9ee3ffe51de137fc4f8743f 100644 (file)
@@ -1840,6 +1840,8 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
        return ret;
 }
 
+static int mi_noop_index;
+
 static struct cmd_info cmd_info[] = {
        {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
 
@@ -2525,7 +2527,12 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 
        cmd = cmd_val(s, 0);
 
-       info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+       /* fastpath for MI_NOOP */
+       if (cmd == MI_NOOP)
+               info = &cmd_info[mi_noop_index];
+       else
+               info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+
        if (info == NULL) {
                gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
                                cmd, get_opcode(cmd, s->ring_id),
@@ -2928,6 +2935,8 @@ static int init_cmd_table(struct intel_gvt *gvt)
                        kfree(e);
                        return -EEXIST;
                }
+               if (cmd_info[i].opcode == OP_MI_NOOP)
+                       mi_noop_index = i;
 
                INIT_HLIST_NODE(&e->hlist);
                add_cmd_entry(gvt, e);
index 3019dbc39aef22573fa04b7120fc9ad021d83c15..df1e14145747ca9666dae9c8bdf7273cc69c4b46 100644 (file)
@@ -462,6 +462,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
 /**
  * intel_vgpu_init_display- initialize vGPU virtual display emulation
  * @vgpu: a vGPU
+ * @resolution: resolution index for intel_vgpu_edid
  *
  * This function is used to initialize vGPU virtual display emulation stuffs
  *
index 4b98539025c5b51014f9d515285f6c1f90d8cb59..5d4bb35bb889e932c6b2905ac06e4495d57b0cb4 100644 (file)
@@ -340,6 +340,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 /**
  * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
  * @vgpu: a vGPU
+ * @offset: reg offset
+ * @p_data: data return buffer
+ * @bytes: access data length
  *
  * This function is used to emulate gmbus register mmio read
  *
@@ -365,6 +368,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
 /**
  * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
  * @vgpu: a vGPU
+ * @offset: reg offset
+ * @p_data: data return buffer
+ * @bytes: access data length
  *
  * This function is used to emulate gmbus register mmio write
  *
@@ -437,6 +443,9 @@ static inline int get_aux_ch_reg(unsigned int offset)
 /**
  * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
  * @vgpu: a vGPU
+ * @port_idx: port index
+ * @offset: reg offset
+ * @p_data: write ptr
  *
  * This function is used to emulate AUX channel register write
  *
index 00aad8164dec2037f8fc8709298aa3dba2c8c0fa..2402395a068da2fc5e83ba988aa76afccfe09f03 100644 (file)
@@ -1113,6 +1113,10 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
 }
 
 /**
+ * Check if can do 2M page
+ * @vgpu: target vgpu
+ * @entry: target pfn's gtt entry
+ *
  * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
  * negtive if found err.
  */
@@ -1945,7 +1949,7 @@ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
 
 /**
  * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
- * @vgpu: a vGPU
+ * @mm: target vgpu mm
  *
  * This function is called when user wants to use a vGPU mm object. If this
  * mm object hasn't been shadowed yet, the shadow will be populated at this
@@ -2521,8 +2525,7 @@ fail:
 /**
  * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
  * @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
- * @root_entry: PPGTT page table root pointers
+ * @pdps: pdp root array
  *
  * This function is used to find a PPGTT mm object from mm object pool
  *
index 46c8b720e336317f5fdf1ce51553031112513295..6ef5a7fc70df967b2de542da23501dd5d0f1369b 100644 (file)
@@ -189,7 +189,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
 
 /**
  * intel_gvt_init_host - Load MPT modules and detect if we're running in host
- * @gvt: intel gvt device
  *
  * This function is called at the driver loading stage. If failed to find a
  * loadable MPT module or detect currently we're running in a VM, then GVT-g
@@ -303,7 +302,7 @@ static int init_service_thread(struct intel_gvt *gvt)
 
 /**
  * intel_gvt_clean_device - clean a GVT device
- * @gvt: intel gvt device
+ * @dev_priv: i915 private
  *
  * This function is called at the driver unloading stage, to free the
  * resources owned by a GVT device.
index 7a58ca5551977a086ce8b25dbe18aa26497f45bc..d26258786e3ff90ed959ca7aab253b7939594a08 100644 (file)
@@ -1287,12 +1287,13 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
 {
        write_vreg(vgpu, offset, p_data, bytes);
 
-       if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
+       if (vgpu_vreg(vgpu, offset) &
+           HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
                vgpu_vreg(vgpu, offset) |=
-                       HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
+                       HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
        else
                vgpu_vreg(vgpu, offset) &=
-                       ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
+                       ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
        return 0;
 }
 
@@ -2118,7 +2119,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
        MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
                gmbus_mmio_write);
-       MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
        MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
 
        MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
@@ -2443,17 +2444,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
        MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
        MMIO_D(GEN6_PMINTRMSK, D_ALL);
-       /*
-        * Use an arbitrary power well controlled by the PWR_WELL_CTL
-        * register.
-        */
-       MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
-               power_well_ctl_mmio_write);
-       MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
-               power_well_ctl_mmio_write);
-       MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
-       MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
-               power_well_ctl_mmio_write);
+       MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
+       MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
+       MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
+       MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
        MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
        MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
 
@@ -2804,13 +2798,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
                                                dp_aux_ch_ctl_mmio_write);
 
-       /*
-        * Use an arbitrary power well controlled by the PWR_WELL_CTL
-        * register.
-        */
-       MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
-       MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
-               skl_power_well_ctl_write);
+       MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
+       MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
 
        MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -3434,6 +3423,7 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
  * @offset: register offset
  * @pdata: data buffer
  * @bytes: data length
+ * @is_read: read or write
  *
  * Returns:
  * Zero on success, negative error code if failed.
index a45f46d8537f15bd187fd4195d7de2c6b6dffd2a..71751be329e3c58c323feae1c774696ff87aec9c 100644 (file)
@@ -1712,7 +1712,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
        return pfn;
 }
 
-int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
                unsigned long size, dma_addr_t *dma_addr)
 {
        struct kvmgt_guest_info *info;
@@ -1761,7 +1761,7 @@ static void __gvt_dma_release(struct kref *ref)
        __gvt_cache_remove_entry(entry->vgpu, entry);
 }
 
-void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
 {
        struct kvmgt_guest_info *info;
        struct gvt_dma *entry;
index 994366035364b7576db8ed2ec1036d417cc39d9c..4db817c21ed89e0dcbc6cde8acc511d80ddcf805 100644 (file)
@@ -39,6 +39,7 @@
 /**
  * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
  * @vgpu: a vGPU
+ * @gpa: guest physical address
  *
  * Returns:
  * Zero on success, negative error code if failed
@@ -228,7 +229,7 @@ out:
 /**
  * intel_vgpu_reset_mmio - reset virtual MMIO space
  * @vgpu: a vGPU
- *
+ * @dmlr: whether this is device model level reset
  */
 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
 {
index 42e1e6bdcc2cfe64a3446eea8019b9a912141ba0..7e702c6a32af58acf1596be64fe92c0a1245b936 100644 (file)
 #include "gvt.h"
 #include "trace.h"
 
-/**
- * Defined in Intel Open Source PRM.
- * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
- */
-#define TRVATTL3PTRDW(i)       _MMIO(0x4de0 + (i)*4)
-#define TRNULLDETCT            _MMIO(0x4de8)
-#define TRINVTILEDETCT         _MMIO(0x4dec)
-#define TRVADR                 _MMIO(0x4df0)
-#define TRTTE                  _MMIO(0x4df4)
-#define RING_EXCC(base)                _MMIO((base) + 0x28)
-#define RING_GFX_MODE(base)    _MMIO((base) + 0x29c)
-#define VF_GUARDBAND           _MMIO(0x83a4)
-
 #define GEN9_MOCS_SIZE         64
 
 /* Raw offset is appened to each line for convenience. */
index 5c3b9ff9f96aa979edebd277a8b000715a3a7d72..f7eaa442403f7177824ac8f10c47cf0de5c07d85 100644 (file)
@@ -53,5 +53,8 @@ bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
                                       struct i915_request *req);
+#define IS_RESTORE_INHIBIT(a)  \
+       (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
+       ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
 
 #endif
index fa75a2eead9070fbfd1894b659b37f84eed9e11a..82586c8e434fa86062458ff58c3b25ebfdd263ca 100644 (file)
@@ -216,7 +216,6 @@ static void virt_vbt_generation(struct vbt *v)
 /**
  * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
  * @vgpu: a vGPU
- * @gpa: guest physical address of opregion
  *
  * Returns:
  * Zero on success, negative error code if failed.
index 256d0db8bbb1553f3539925dc8ebab698a6070ef..84856022528ee6fb79747e002a335e8201a8fa39 100644 (file)
@@ -41,6 +41,8 @@ struct intel_vgpu_page_track *intel_vgpu_find_page_track(
  * intel_vgpu_register_page_track - register a guest page to be tacked
  * @vgpu: a vGPU
  * @gfn: the gfn of guest page
+ * @handler: page track handler
+ * @priv: tracker private
  *
  * Returns:
  * zero on success, negative error code if failed.
index d4f7ce6dc1d738f0e31bead53e6d6cf45beb7cf4..428d252344f1e4027bc5ffecb12bf7842a1dbe09 100644 (file)
 #define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \
                I915_GTT_PAGE_SIZE)
 
+#define PCH_GPIO_BASE  _MMIO(0xc5010)
+
+#define PCH_GMBUS0     _MMIO(0xc5100)
+#define PCH_GMBUS1     _MMIO(0xc5104)
+#define PCH_GMBUS2     _MMIO(0xc5108)
+#define PCH_GMBUS3     _MMIO(0xc510c)
+#define PCH_GMBUS4     _MMIO(0xc5110)
+#define PCH_GMBUS5     _MMIO(0xc5120)
+
+#define TRVATTL3PTRDW(i)       _MMIO(0x4de0 + (i) * 4)
+#define TRNULLDETCT            _MMIO(0x4de8)
+#define TRINVTILEDETCT         _MMIO(0x4dec)
+#define TRVADR                 _MMIO(0x4df0)
+#define TRTTE                  _MMIO(0x4df4)
+#define RING_EXCC(base)                _MMIO((base) + 0x28)
+#define RING_GFX_MODE(base)    _MMIO((base) + 0x29c)
+#define VF_GUARDBAND           _MMIO(0x83a4)
+
 #endif
index 43aa058e29fca92368c55aa3b458393951de72e8..ea34003d6dd251e26ac8fad62607d043374e9b71 100644 (file)
@@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        unsigned long context_gpa, context_page_num;
        int i;
 
-       gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
-                       workload->ctx_desc.lrca);
-
-       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
-       context_page_num = context_page_num >> PAGE_SHIFT;
-
-       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
-               context_page_num = 19;
-
-       i = 2;
-
-       while (i < context_page_num) {
-               context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
-                               (u32)((workload->ctx_desc.lrca + i) <<
-                               I915_GTT_PAGE_SHIFT));
-               if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_vgpu_err("Invalid guest context descriptor\n");
-                       return -EFAULT;
-               }
-
-               page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
-               dst = kmap(page);
-               intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
-                               I915_GTT_PAGE_SIZE);
-               kunmap(page);
-               i++;
-       }
-
        page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
        shadow_ring_context = kmap(page);
 
@@ -195,6 +166,37 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
        sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
        kunmap(page);
+
+       if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+               return 0;
+
+       gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+                       workload->ctx_desc.lrca);
+
+       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+       context_page_num = context_page_num >> PAGE_SHIFT;
+
+       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+               context_page_num = 19;
+
+       i = 2;
+       while (i < context_page_num) {
+               context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+                               (u32)((workload->ctx_desc.lrca + i) <<
+                               I915_GTT_PAGE_SHIFT));
+               if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+                       gvt_vgpu_err("Invalid guest context descriptor\n");
+                       return -EFAULT;
+               }
+
+               page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+               dst = kmap(page);
+               intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+                               I915_GTT_PAGE_SIZE);
+               kunmap(page);
+               i++;
+       }
        return 0;
 }
 
@@ -1138,6 +1140,7 @@ out_shadow_ctx:
 /**
  * intel_vgpu_select_submission_ops - select virtual submission interface
  * @vgpu: a vGPU
+ * @engine_mask: either ALL_ENGINES or target engine mask
  * @interface: expected vGPU virtual submission interface
  *
  * This function is called when guest configures submission interface.
@@ -1190,7 +1193,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
 
 /**
  * intel_vgpu_destroy_workload - destroy a vGPU workload
- * @vgpu: a vGPU
+ * @workload: workload to destroy
  *
  * This function is called when destroy a vGPU workload.
  *
@@ -1282,6 +1285,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 /**
  * intel_vgpu_create_workload - create a vGPU workload
  * @vgpu: a vGPU
+ * @ring_id: ring index
  * @desc: a guest context descriptor
  *
  * This function is called when creating a vGPU workload.
index f9ce35da4123ec52657f55f6a704c12c9c286080..1f7051e97afb16db9000000df4b92250f6a9de27 100644 (file)
@@ -1953,7 +1953,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
                return ret;
 
        list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-               seq_printf(m, "HW context %u ", ctx->hw_id);
+               seq_puts(m, "HW context ");
+               if (!list_empty(&ctx->hw_id_link))
+                       seq_printf(m, "%x [pin %u]", ctx->hw_id,
+                                  atomic_read(&ctx->hw_id_pin_count));
                if (ctx->pid) {
                        struct task_struct *task;
 
@@ -2708,7 +2711,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->psr.lock);
-       seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+       seq_printf(m, "PSR mode: %s\n",
+                  dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
+       seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
        seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
                   dev_priv->psr.busy_frontbuffer_bits);
 
@@ -2735,7 +2740,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        psr_source_status(dev_priv, m);
        mutex_unlock(&dev_priv->psr.lock);
 
-       if (READ_ONCE(dev_priv->psr.debug)) {
+       if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
                seq_printf(m, "Last attempted entry at: %lld\n",
                           dev_priv->psr.last_entry_attempt);
                seq_printf(m, "Last exit at: %lld\n",
@@ -2750,17 +2755,32 @@ static int
 i915_edp_psr_debug_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
+       struct drm_modeset_acquire_ctx ctx;
+       int ret;
 
        if (!CAN_PSR(dev_priv))
                return -ENODEV;
 
-       DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
+       DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
 
        intel_runtime_pm_get(dev_priv);
-       intel_psr_irq_control(dev_priv, !!val);
+
+       drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+retry:
+       ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
+       if (ret == -EDEADLK) {
+               ret = drm_modeset_backoff(&ctx);
+               if (!ret)
+                       goto retry;
+       }
+
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
        intel_runtime_pm_put(dev_priv);
 
-       return 0;
+       return ret;
 }
 
 static int
@@ -2845,10 +2865,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
                enum intel_display_power_domain power_domain;
 
                power_well = &power_domains->power_wells[i];
-               seq_printf(m, "%-25s %d\n", power_well->name,
+               seq_printf(m, "%-25s %d\n", power_well->desc->name,
                           power_well->count);
 
-               for_each_power_domain(power_domain, power_well->domains)
+               for_each_power_domain(power_domain, power_well->desc->domains)
                        seq_printf(m, "  %-23s %d\n",
                                 intel_display_power_domain_str(power_domain),
                                 power_domains->domain_use_count[power_domain]);
@@ -4114,13 +4134,17 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
 #define DROP_FREED     BIT(4)
 #define DROP_SHRINK_ALL        BIT(5)
 #define DROP_IDLE      BIT(6)
+#define DROP_RESET_ACTIVE      BIT(7)
+#define DROP_RESET_SEQNO       BIT(8)
 #define DROP_ALL (DROP_UNBOUND | \
                  DROP_BOUND    | \
                  DROP_RETIRE   | \
                  DROP_ACTIVE   | \
                  DROP_FREED    | \
                  DROP_SHRINK_ALL |\
-                 DROP_IDLE)
+                 DROP_IDLE     | \
+                 DROP_RESET_ACTIVE | \
+                 DROP_RESET_SEQNO)
 static int
 i915_drop_caches_get(void *data, u64 *val)
 {
@@ -4132,53 +4156,69 @@ i915_drop_caches_get(void *data, u64 *val)
 static int
 i915_drop_caches_set(void *data, u64 val)
 {
-       struct drm_i915_private *dev_priv = data;
-       struct drm_device *dev = &dev_priv->drm;
+       struct drm_i915_private *i915 = data;
        int ret = 0;
 
        DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
                  val, val & DROP_ALL);
 
+       if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
+               i915_gem_set_wedged(i915);
+
        /* No need to check and wait for gpu resets, only libdrm auto-restarts
         * on ioctls on -EAGAIN. */
-       if (val & (DROP_ACTIVE | DROP_RETIRE)) {
-               ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+               ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
                if (ret)
                        return ret;
 
                if (val & DROP_ACTIVE)
-                       ret = i915_gem_wait_for_idle(dev_priv,
+                       ret = i915_gem_wait_for_idle(i915,
                                                     I915_WAIT_INTERRUPTIBLE |
                                                     I915_WAIT_LOCKED,
                                                     MAX_SCHEDULE_TIMEOUT);
 
+               if (val & DROP_RESET_SEQNO) {
+                       intel_runtime_pm_get(i915);
+                       ret = i915_gem_set_global_seqno(&i915->drm, 1);
+                       intel_runtime_pm_put(i915);
+               }
+
                if (val & DROP_RETIRE)
-                       i915_retire_requests(dev_priv);
+                       i915_retire_requests(i915);
 
-               mutex_unlock(&dev->struct_mutex);
+               mutex_unlock(&i915->drm.struct_mutex);
+       }
+
+       if (val & DROP_RESET_ACTIVE &&
+           i915_terminally_wedged(&i915->gpu_error)) {
+               i915_handle_error(i915, ALL_ENGINES, 0, NULL);
+               wait_on_bit(&i915->gpu_error.flags,
+                           I915_RESET_HANDOFF,
+                           TASK_UNINTERRUPTIBLE);
        }
 
        fs_reclaim_acquire(GFP_KERNEL);
        if (val & DROP_BOUND)
-               i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
+               i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
 
        if (val & DROP_UNBOUND)
-               i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
+               i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
 
        if (val & DROP_SHRINK_ALL)
-               i915_gem_shrink_all(dev_priv);
+               i915_gem_shrink_all(i915);
        fs_reclaim_release(GFP_KERNEL);
 
        if (val & DROP_IDLE) {
                do {
-                       if (READ_ONCE(dev_priv->gt.active_requests))
-                               flush_delayed_work(&dev_priv->gt.retire_work);
-                       drain_delayed_work(&dev_priv->gt.idle_work);
-               } while (READ_ONCE(dev_priv->gt.awake));
+                       if (READ_ONCE(i915->gt.active_requests))
+                               flush_delayed_work(&i915->gt.retire_work);
+                       drain_delayed_work(&i915->gt.idle_work);
+               } while (READ_ONCE(i915->gt.awake));
        }
 
        if (val & DROP_FREED)
-               i915_gem_drain_freed_objects(dev_priv);
+               i915_gem_drain_freed_objects(i915);
 
        return ret;
 }
index f8cfd16be534cf3eece97c4a59a456e5d8769bde..5dd7fc582e6fd9d4975f9ed2f5968d247d27e892 100644 (file)
@@ -373,7 +373,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                        value = 2;
                break;
        case I915_PARAM_HAS_RESOURCE_STREAMER:
-               value = HAS_RESOURCE_STREAMER(dev_priv);
+               value = 0;
                break;
        case I915_PARAM_HAS_POOLED_EU:
                value = HAS_POOLED_EU(dev_priv);
@@ -441,6 +441,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
        case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
                value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
                break;
+       case I915_PARAM_MMAP_GTT_COHERENT:
+               value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
+               break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
                return -EINVAL;
@@ -709,7 +712,7 @@ cleanup_irq:
        intel_teardown_gmbus(dev_priv);
 cleanup_csr:
        intel_csr_ucode_fini(dev_priv);
-       intel_power_domains_fini(dev_priv);
+       intel_power_domains_fini_hw(dev_priv);
        vga_switcheroo_unregister_client(pdev);
 cleanup_vga_client:
        vga_client_register(pdev, NULL, NULL, NULL);
@@ -867,7 +870,6 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
 /**
  * i915_driver_init_early - setup state not requiring device access
  * @dev_priv: device private
- * @ent: the matching pci_device_id
  *
  * Initialize everything that is a "SW-only" state, that is state not
  * requiring accessing the device or exposing the driver via kernel internal
@@ -875,25 +877,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
  * system memory allocation, setting up device specific attributes and
  * function hooks not requiring accessing the device.
  */
-static int i915_driver_init_early(struct drm_i915_private *dev_priv,
-                                 const struct pci_device_id *ent)
+static int i915_driver_init_early(struct drm_i915_private *dev_priv)
 {
-       const struct intel_device_info *match_info =
-               (struct intel_device_info *)ent->driver_data;
-       struct intel_device_info *device_info;
        int ret = 0;
 
        if (i915_inject_load_failure())
                return -ENODEV;
 
-       /* Setup the write-once "constant" device info */
-       device_info = mkwrite_device_info(dev_priv);
-       memcpy(device_info, match_info, sizeof(*device_info));
-       device_info->device_id = dev_priv->drm.pdev->device;
-
-       BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
-                    sizeof(device_info->platform_mask) * BITS_PER_BYTE);
-       BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
        mutex_init(&dev_priv->backlight_lock);
@@ -921,7 +911,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        intel_uc_init_early(dev_priv);
        intel_pm_setup(dev_priv);
        intel_init_dpio(dev_priv);
-       intel_power_domains_init(dev_priv);
+       ret = intel_power_domains_init(dev_priv);
+       if (ret < 0)
+               goto err_uc;
        intel_irq_init(dev_priv);
        intel_hangcheck_init(dev_priv);
        intel_init_display_hooks(dev_priv);
@@ -933,6 +925,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 
        return 0;
 
+err_uc:
+       intel_uc_cleanup_early(dev_priv);
+       i915_gem_cleanup_early(dev_priv);
 err_workqueues:
        i915_workqueues_cleanup(dev_priv);
 err_engines:
@@ -947,6 +942,7 @@ err_engines:
 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 {
        intel_irq_fini(dev_priv);
+       intel_power_domains_cleanup(dev_priv);
        intel_uc_cleanup_early(dev_priv);
        i915_gem_cleanup_early(dev_priv);
        i915_workqueues_cleanup(dev_priv);
@@ -1272,6 +1268,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
         */
        if (INTEL_INFO(dev_priv)->num_pipes)
                drm_kms_helper_poll_init(dev);
+
+       intel_power_domains_enable(dev_priv);
+       intel_runtime_pm_enable(dev_priv);
 }
 
 /**
@@ -1280,6 +1279,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 {
+       intel_runtime_pm_disable(dev_priv);
+       intel_power_domains_disable(dev_priv);
+
        intel_fbdev_unregister(dev_priv);
        intel_audio_deinit(dev_priv);
 
@@ -1316,6 +1318,52 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
                DRM_INFO("DRM_I915_DEBUG enabled\n");
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+               DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+}
+
+static struct drm_i915_private *
+i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       const struct intel_device_info *match_info =
+               (struct intel_device_info *)ent->driver_data;
+       struct intel_device_info *device_info;
+       struct drm_i915_private *i915;
+
+       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+       if (!i915)
+               return NULL;
+
+       if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
+               kfree(i915);
+               return NULL;
+       }
+
+       i915->drm.pdev = pdev;
+       i915->drm.dev_private = i915;
+       pci_set_drvdata(pdev, &i915->drm);
+
+       /* Setup the write-once "constant" device info */
+       device_info = mkwrite_device_info(i915);
+       memcpy(device_info, match_info, sizeof(*device_info));
+       device_info->device_id = pdev->device;
+
+       BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+                    sizeof(device_info->platform_mask) * BITS_PER_BYTE);
+       BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+
+       return i915;
+}
+
+static void i915_driver_destroy(struct drm_i915_private *i915)
+{
+       struct pci_dev *pdev = i915->drm.pdev;
+
+       drm_dev_fini(&i915->drm);
+       kfree(i915);
+
+       /* And make sure we never chase our dangling pointer from pci_dev */
+       pci_set_drvdata(pdev, NULL);
 }
 
 /**
@@ -1340,38 +1388,19 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
                driver.driver_features &= ~DRIVER_ATOMIC;
 
-       ret = -ENOMEM;
-       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
-       if (dev_priv)
-               ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
-       if (ret) {
-               DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
-               goto out_free;
-       }
-
-       dev_priv->drm.pdev = pdev;
-       dev_priv->drm.dev_private = dev_priv;
+       dev_priv = i915_driver_create(pdev, ent);
+       if (!dev_priv)
+               return -ENOMEM;
 
        ret = pci_enable_device(pdev);
        if (ret)
                goto out_fini;
 
-       pci_set_drvdata(pdev, &dev_priv->drm);
-       /*
-        * Disable the system suspend direct complete optimization, which can
-        * leave the device suspended skipping the driver's suspend handlers
-        * if the device was already runtime suspended. This is needed due to
-        * the difference in our runtime and system suspend sequence and
-        * becaue the HDA driver may require us to enable the audio power
-        * domain during system suspend.
-        */
-       dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
-
-       ret = i915_driver_init_early(dev_priv, ent);
+       ret = i915_driver_init_early(dev_priv);
        if (ret < 0)
                goto out_pci_disable;
 
-       intel_runtime_pm_get(dev_priv);
+       disable_rpm_wakeref_asserts(dev_priv);
 
        ret = i915_driver_init_mmio(dev_priv);
        if (ret < 0)
@@ -1399,11 +1428,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        i915_driver_register(dev_priv);
 
-       intel_runtime_pm_enable(dev_priv);
-
        intel_init_ipc(dev_priv);
 
-       intel_runtime_pm_put(dev_priv);
+       enable_rpm_wakeref_asserts(dev_priv);
 
        i915_welcome_messages(dev_priv);
 
@@ -1414,16 +1441,13 @@ out_cleanup_hw:
 out_cleanup_mmio:
        i915_driver_cleanup_mmio(dev_priv);
 out_runtime_pm_put:
-       intel_runtime_pm_put(dev_priv);
+       enable_rpm_wakeref_asserts(dev_priv);
        i915_driver_cleanup_early(dev_priv);
 out_pci_disable:
        pci_disable_device(pdev);
 out_fini:
        i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
-       drm_dev_fini(&dev_priv->drm);
-out_free:
-       kfree(dev_priv);
-       pci_set_drvdata(pdev, NULL);
+       i915_driver_destroy(dev_priv);
        return ret;
 }
 
@@ -1432,13 +1456,13 @@ void i915_driver_unload(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
 
+       disable_rpm_wakeref_asserts(dev_priv);
+
        i915_driver_unregister(dev_priv);
 
        if (i915_gem_suspend(dev_priv))
                DRM_ERROR("failed to idle hardware; continuing to unload!\n");
 
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
        drm_atomic_helper_shutdown(dev);
 
        intel_gvt_cleanup(dev_priv);
@@ -1459,12 +1483,14 @@ void i915_driver_unload(struct drm_device *dev)
        i915_gem_fini(dev_priv);
        intel_fbc_cleanup_cfb(dev_priv);
 
-       intel_power_domains_fini(dev_priv);
+       intel_power_domains_fini_hw(dev_priv);
 
        i915_driver_cleanup_hw(dev_priv);
        i915_driver_cleanup_mmio(dev_priv);
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+       enable_rpm_wakeref_asserts(dev_priv);
+
+       WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
 }
 
 static void i915_driver_release(struct drm_device *dev)
@@ -1472,9 +1498,7 @@ static void i915_driver_release(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        i915_driver_cleanup_early(dev_priv);
-       drm_dev_fini(&dev_priv->drm);
-
-       kfree(dev_priv);
+       i915_driver_destroy(dev_priv);
 }
 
 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1573,7 +1597,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        /* We do a lot of poking in a lot of registers, make sure they work
         * properly. */
-       intel_display_set_init_power(dev_priv, true);
+       intel_power_domains_disable(dev_priv);
 
        drm_kms_helper_poll_disable(dev);
 
@@ -1610,6 +1634,18 @@ static int i915_drm_suspend(struct drm_device *dev)
        return 0;
 }
 
+static enum i915_drm_suspend_mode
+get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
+{
+       if (hibernate)
+               return I915_DRM_SUSPEND_HIBERNATE;
+
+       if (suspend_to_idle(dev_priv))
+               return I915_DRM_SUSPEND_IDLE;
+
+       return I915_DRM_SUSPEND_MEM;
+}
+
 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1620,21 +1656,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        i915_gem_suspend_late(dev_priv);
 
-       intel_display_set_init_power(dev_priv, false);
        intel_uncore_suspend(dev_priv);
 
-       /*
-        * In case of firmware assisted context save/restore don't manually
-        * deinit the power domains. This also means the CSR/DMC firmware will
-        * stay active, it will power down any HW resources as required and
-        * also enable deeper system power states that would be blocked if the
-        * firmware was inactive.
-        */
-       if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
-           dev_priv->csr.dmc_payload == NULL) {
-               intel_power_domains_suspend(dev_priv);
-               dev_priv->power_domains_suspended = true;
-       }
+       intel_power_domains_suspend(dev_priv,
+                                   get_suspend_mode(dev_priv, hibernation));
 
        ret = 0;
        if (IS_GEN9_LP(dev_priv))
@@ -1646,10 +1671,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        if (ret) {
                DRM_ERROR("Suspend complete failed: %d\n", ret);
-               if (dev_priv->power_domains_suspended) {
-                       intel_power_domains_init_hw(dev_priv, true);
-                       dev_priv->power_domains_suspended = false;
-               }
+               intel_power_domains_resume(dev_priv);
 
                goto out;
        }
@@ -1755,7 +1777,7 @@ static int i915_drm_resume(struct drm_device *dev)
        /*
         * ... but also need to make sure that hotplug processing
         * doesn't cause havoc. Like in the driver load code we don't
-        * bother with the tiny race here where we might loose hotplug
+        * bother with the tiny race here where we might lose hotplug
         * notifications.
         * */
        intel_hpd_init(dev_priv);
@@ -1766,6 +1788,8 @@ static int i915_drm_resume(struct drm_device *dev)
 
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
+       intel_power_domains_enable(dev_priv);
+
        enable_rpm_wakeref_asserts(dev_priv);
 
        return 0;
@@ -1800,7 +1824,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
        ret = pci_set_power_state(pdev, PCI_D0);
        if (ret) {
                DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
-               goto out;
+               return ret;
        }
 
        /*
@@ -1816,10 +1840,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * depend on the device enable refcount we can't anyway depend on them
         * disabling/enabling the device.
         */
-       if (pci_enable_device(pdev)) {
-               ret = -EIO;
-               goto out;
-       }
+       if (pci_enable_device(pdev))
+               return -EIO;
 
        pci_set_master(pdev);
 
@@ -1842,18 +1864,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_uncore_sanitize(dev_priv);
 
-       if (dev_priv->power_domains_suspended)
-               intel_power_domains_init_hw(dev_priv, true);
-       else
-               intel_display_set_init_power(dev_priv, true);
+       intel_power_domains_resume(dev_priv);
 
        intel_engines_sanitize(dev_priv);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
-out:
-       dev_priv->power_domains_suspended = false;
-
        return ret;
 }
 
@@ -1915,7 +1931,6 @@ void i915_reset(struct drm_i915_private *i915,
                dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
        error->reset_count++;
 
-       disable_irq(i915->drm.irq);
        ret = i915_gem_reset_prepare(i915);
        if (ret) {
                dev_err(i915->drm.dev, "GPU recovery failed\n");
@@ -1977,8 +1992,6 @@ void i915_reset(struct drm_i915_private *i915,
 
 finish:
        i915_gem_reset_finish(i915);
-       enable_irq(i915->drm.irq);
-
 wakeup:
        clear_bit(I915_RESET_HANDOFF, &error->flags);
        wake_up_bit(&error->flags, I915_RESET_HANDOFF);
@@ -2073,6 +2086,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
                goto out;
 
 out:
+       intel_engine_cancel_stop_cs(engine);
        i915_gem_reset_finish_engine(engine);
        return ret;
 }
index 4aca5344863d6fc013470b41a706c4e7bd18d567..2ccb982a5dba8faf55ffd8b9ca9010b4d3854c9b 100644 (file)
@@ -86,8 +86,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20180719"
-#define DRIVER_TIMESTAMP       1532015279
+#define DRIVER_DATE            "20180906"
+#define DRIVER_TIMESTAMP       1536242083
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -611,8 +611,18 @@ struct i915_drrs {
 
 struct i915_psr {
        struct mutex lock;
+
+#define I915_PSR_DEBUG_MODE_MASK       0x0f
+#define I915_PSR_DEBUG_DEFAULT         0x00
+#define I915_PSR_DEBUG_DISABLE         0x01
+#define I915_PSR_DEBUG_ENABLE          0x02
+#define I915_PSR_DEBUG_FORCE_PSR1      0x03
+#define I915_PSR_DEBUG_IRQ             0x10
+
+       u32 debug;
        bool sink_support;
-       struct intel_dp *enabled;
+       bool prepared, enabled;
+       struct intel_dp *dp;
        bool active;
        struct work_struct work;
        unsigned busy_frontbuffer_bits;
@@ -622,7 +632,6 @@ struct i915_psr {
        bool alpm;
        bool psr2_enabled;
        u8 sink_sync_latency;
-       bool debug;
        ktime_t last_entry_attempt;
        ktime_t last_exit;
 };
@@ -867,14 +876,17 @@ struct i915_power_well_ops {
                           struct i915_power_well *power_well);
 };
 
+struct i915_power_well_regs {
+       i915_reg_t bios;
+       i915_reg_t driver;
+       i915_reg_t kvmr;
+       i915_reg_t debug;
+};
+
 /* Power well structure for haswell */
-struct i915_power_well {
+struct i915_power_well_desc {
        const char *name;
        bool always_on;
-       /* power well enable/disable usage count */
-       int count;
-       /* cached hw enabled state */
-       bool hw_enabled;
        u64 domains;
        /* unique identifier for this power well */
        enum i915_power_well_id id;
@@ -883,10 +895,23 @@ struct i915_power_well {
         * well specific.
         */
        union {
+               struct {
+                       /*
+                        * request/status flag index in the PUNIT power well
+                        * control/status registers.
+                        */
+                       u8 idx;
+               } vlv;
                struct {
                        enum dpio_phy phy;
                } bxt;
                struct {
+                       const struct i915_power_well_regs *regs;
+                       /*
+                        * request/status flag index in the power well
+                        * constrol/status registers.
+                        */
+                       u8 idx;
                        /* Mask of pipes whose IRQ logic is backed by the pw */
                        u8 irq_pipe_mask;
                        /* The pw is backing the VGA functionality */
@@ -897,13 +922,21 @@ struct i915_power_well {
        const struct i915_power_well_ops *ops;
 };
 
+struct i915_power_well {
+       const struct i915_power_well_desc *desc;
+       /* power well enable/disable usage count */
+       int count;
+       /* cached hw enabled state */
+       bool hw_enabled;
+};
+
 struct i915_power_domains {
        /*
         * Power wells needed for initialization at driver init and suspend
         * time are on. They are kept on until after the first modeset.
         */
-       bool init_power_on;
        bool initializing;
+       bool display_core_suspended;
        int power_well_count;
 
        struct mutex lock;
@@ -1610,7 +1643,8 @@ struct drm_i915_private {
        struct mutex gmbus_mutex;
 
        /**
-        * Base address of the gmbus and gpio block.
+        * Base address of where the gmbus and gpio blocks are located (either
+        * on PCH or on SoC for platforms without PCH).
         */
        uint32_t gpio_mmio_base;
 
@@ -1632,7 +1666,6 @@ struct drm_i915_private {
        struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
                                            [MAX_ENGINE_INSTANCE + 1];
 
-       struct drm_dma_handle *status_page_dmah;
        struct resource mch_res;
 
        /* protects the irq masks */
@@ -1828,6 +1861,7 @@ struct drm_i915_private {
        struct mutex av_mutex;
 
        struct {
+               struct mutex mutex;
                struct list_head list;
                struct llist_head free_list;
                struct work_struct free_work;
@@ -1840,6 +1874,7 @@ struct drm_i915_private {
 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
 #define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
 #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+               struct list_head hw_id_list;
        } contexts;
 
        u32 fdi_rx_config;
@@ -2610,8 +2645,6 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define USES_GUC_SUBMISSION(dev_priv)  intel_uc_is_using_guc_submission()
 #define USES_HUC(dev_priv)             intel_uc_is_using_huc()
 
-#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
-
 #define HAS_POOLED_EU(dev_priv)        ((dev_priv)->info.has_pooled_eu)
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff80
@@ -2775,6 +2808,8 @@ extern void intel_irq_fini(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
+void i915_clear_error_registers(struct drm_i915_private *dev_priv);
+
 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
 {
        return dev_priv->gvt;
index fcc73a6ab503e2f4ffd5b9a5967b36d6ba5f1958..89834ce19acda0fe9090bbb97733109c40ac2ce4 100644 (file)
@@ -802,6 +802,11 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
         * that was!).
         */
 
+       wmb();
+
+       if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
+               return;
+
        i915_gem_chipset_flush(dev_priv);
 
        intel_runtime_pm_get(dev_priv);
@@ -1906,7 +1911,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
+static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
 {
        return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
 }
@@ -1965,7 +1970,7 @@ int i915_gem_mmap_gtt_version(void)
 }
 
 static inline struct i915_ggtt_view
-compute_partial_view(struct drm_i915_gem_object *obj,
+compute_partial_view(const struct drm_i915_gem_object *obj,
                     pgoff_t page_offset,
                     unsigned int chunk)
 {
@@ -2013,7 +2018,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+       bool write = area->vm_flags & VM_WRITE;
        struct i915_vma *vma;
        pgoff_t page_offset;
        int ret;
@@ -2528,13 +2533,21 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        gfp_t noreclaim;
        int ret;
 
-       /* Assert that the object is not currently in any GPU domain. As it
+       /*
+        * Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
         * a GPU cache
         */
        GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
        GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
 
+       /*
+        * If there's no chance of allocating enough pages for the whole
+        * object, bail early.
+        */
+       if (page_count > totalram_pages)
+               return -ENOMEM;
+
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
                return -ENOMEM;
@@ -2545,7 +2558,8 @@ rebuild_st:
                return -ENOMEM;
        }
 
-       /* Get the list of pages out of our struct file.  They'll be pinned
+       /*
+        * Get the list of pages out of our struct file.  They'll be pinned
         * at this point until we release them.
         *
         * Fail silently without starting the shrinker
@@ -2577,7 +2591,8 @@ rebuild_st:
                        i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
                        cond_resched();
 
-                       /* We've tried hard to allocate the memory by reaping
+                       /*
+                        * We've tried hard to allocate the memory by reaping
                         * our own buffer, now let the real VM do its job and
                         * go down in flames if truly OOM.
                         *
@@ -2589,7 +2604,8 @@ rebuild_st:
                                /* reclaim and warn, but no oom */
                                gfp = mapping_gfp_mask(mapping);
 
-                               /* Our bo are always dirty and so we require
+                               /*
+                                * Our bo are always dirty and so we require
                                 * kswapd to reclaim our pages (direct reclaim
                                 * does not effectively begin pageout of our
                                 * buffers on its own). However, direct reclaim
@@ -2633,7 +2649,8 @@ rebuild_st:
 
        ret = i915_gem_gtt_prepare_pages(obj, st);
        if (ret) {
-               /* DMA remapping failed? One possible cause is that
+               /*
+                * DMA remapping failed? One possible cause is that
                 * it could not reserve enough large entries, asking
                 * for PAGE_SIZE chunks instead may be helpful.
                 */
@@ -2667,7 +2684,8 @@ err_pages:
        sg_free_table(st);
        kfree(st);
 
-       /* shmemfs first checks if there is enough memory to allocate the page
+       /*
+        * shmemfs first checks if there is enough memory to allocate the page
         * and reports ENOSPC should there be insufficient, along with the usual
         * ENOMEM for a genuine allocation failure.
         *
@@ -3307,8 +3325,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
                        intel_engine_dump(engine, &p, "%s\n", engine->name);
        }
 
-       set_bit(I915_WEDGED, &i915->gpu_error.flags);
-       smp_mb__after_atomic();
+       if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags))
+               goto out;
 
        /*
         * First, stop submission to hw, but do not yet complete requests by
@@ -3324,7 +3342,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
        i915->caps.scheduler = 0;
 
        /* Even if the GPU reset fails, it should still stop the engines */
-       intel_gpu_reset(i915, ALL_ENGINES);
+       if (INTEL_GEN(i915) >= 5)
+               intel_gpu_reset(i915, ALL_ENGINES);
 
        /*
         * Make sure no one is running the old callback before we proceed with
@@ -3367,6 +3386,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
                i915_gem_reset_finish_engine(engine);
        }
 
+out:
        GEM_TRACE("end\n");
 
        wake_up_all(&i915->gpu_error.reset_queue);
@@ -3816,6 +3836,12 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
                        if (timeout < 0)
                                return timeout;
                }
+               if (GEM_SHOW_DEBUG() && !timeout) {
+                       /* Presume that timeout was non-zero to begin with! */
+                       dev_warn(&i915->drm.pdev->dev,
+                                "Missed idle-completion interrupt!\n");
+                       GEM_TRACE_DUMP();
+               }
 
                err = wait_for_engines(i915);
                if (err)
@@ -5592,6 +5618,8 @@ err_uc_misc:
                i915_gem_cleanup_userptr(dev_priv);
 
        if (ret == -EIO) {
+               mutex_lock(&dev_priv->drm.struct_mutex);
+
                /*
                 * Allow engine initialisation to fail by marking the GPU as
                 * wedged. But we only want to do this where the GPU is angry,
@@ -5602,7 +5630,14 @@ err_uc_misc:
                                        "Failed to initialize GPU, declaring it wedged!\n");
                        i915_gem_set_wedged(dev_priv);
                }
-               ret = 0;
+
+               /* Minimal basic recovery for KMS */
+               ret = i915_ggtt_enable_hw(dev_priv);
+               i915_gem_restore_gtt_mappings(dev_priv);
+               i915_gem_restore_fences(dev_priv);
+               intel_init_clock_gating(dev_priv);
+
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
        i915_gem_drain_freed_objects(dev_priv);
@@ -5612,6 +5647,7 @@ err_uc_misc:
 void i915_gem_fini(struct drm_i915_private *dev_priv)
 {
        i915_gem_suspend_late(dev_priv);
+       intel_disable_gt_powersave(dev_priv);
 
        /* Flush any outstanding unpin_work. */
        i915_gem_drain_workqueue(dev_priv);
@@ -5623,6 +5659,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
        i915_gem_contexts_fini(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
+       intel_cleanup_gt_powersave(dev_priv);
+
        intel_uc_fini_misc(dev_priv);
        i915_gem_cleanup_userptr(dev_priv);
 
@@ -6182,4 +6220,5 @@ err_unlock:
 #include "selftests/huge_pages.c"
 #include "selftests/i915_gem_object.c"
 #include "selftests/i915_gem_coherency.c"
+#include "selftests/i915_gem.c"
 #endif
index e465929568726c31a39dfb6037da594c1a93f3ac..599c4f6eb1eab017f20ef7e06c4ce43aa817c42f 100644 (file)
@@ -82,12 +82,6 @@ static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
                tasklet_unlock_wait(t);
 }
 
-static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
-{
-       if (atomic_dec_return(&t->count) == 0)
-               tasklet_kill(t);
-}
-
 static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
 {
        return !atomic_read(&t->count);
index b10770cfccd24bedd80a7fd67ac06d78dde695c1..747b8170a15a0249d5bc25c965ccf840786910d8 100644 (file)
@@ -115,6 +115,95 @@ static void lut_close(struct i915_gem_context *ctx)
        rcu_read_unlock();
 }
 
+static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
+{
+       unsigned int max;
+
+       lockdep_assert_held(&i915->contexts.mutex);
+
+       if (INTEL_GEN(i915) >= 11)
+               max = GEN11_MAX_CONTEXT_HW_ID;
+       else if (USES_GUC_SUBMISSION(i915))
+               /*
+                * When using GuC in proxy submission, GuC consumes the
+                * highest bit in the context id to indicate proxy submission.
+                */
+               max = MAX_GUC_CONTEXT_HW_ID;
+       else
+               max = MAX_CONTEXT_HW_ID;
+
+       return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
+}
+
+static int steal_hw_id(struct drm_i915_private *i915)
+{
+       struct i915_gem_context *ctx, *cn;
+       LIST_HEAD(pinned);
+       int id = -ENOSPC;
+
+       lockdep_assert_held(&i915->contexts.mutex);
+
+       list_for_each_entry_safe(ctx, cn,
+                                &i915->contexts.hw_id_list, hw_id_link) {
+               if (atomic_read(&ctx->hw_id_pin_count)) {
+                       list_move_tail(&ctx->hw_id_link, &pinned);
+                       continue;
+               }
+
+               GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
+               list_del_init(&ctx->hw_id_link);
+               id = ctx->hw_id;
+               break;
+       }
+
+       /*
+        * Remember how far we got up on the last repossesion scan, so the
+        * list is kept in a "least recently scanned" order.
+        */
+       list_splice_tail(&pinned, &i915->contexts.hw_id_list);
+       return id;
+}
+
+static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
+{
+       int ret;
+
+       lockdep_assert_held(&i915->contexts.mutex);
+
+       /*
+        * We prefer to steal/stall ourselves and our users over that of the
+        * entire system. That may be a little unfair to our users, and
+        * even hurt high priority clients. The choice is whether to oomkill
+        * something else, or steal a context id.
+        */
+       ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+       if (unlikely(ret < 0)) {
+               ret = steal_hw_id(i915);
+               if (ret < 0) /* once again for the correct errno code */
+                       ret = new_hw_id(i915, GFP_KERNEL);
+               if (ret < 0)
+                       return ret;
+       }
+
+       *out = ret;
+       return 0;
+}
+
+static void release_hw_id(struct i915_gem_context *ctx)
+{
+       struct drm_i915_private *i915 = ctx->i915;
+
+       if (list_empty(&ctx->hw_id_link))
+               return;
+
+       mutex_lock(&i915->contexts.mutex);
+       if (!list_empty(&ctx->hw_id_link)) {
+               ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
+               list_del_init(&ctx->hw_id_link);
+       }
+       mutex_unlock(&i915->contexts.mutex);
+}
+
 static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
        unsigned int n;
@@ -122,6 +211,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+       release_hw_id(ctx);
        i915_ppgtt_put(ctx->ppgtt);
 
        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
@@ -136,7 +226,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 
        list_del(&ctx->link);
 
-       ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
        kfree_rcu(ctx, rcu);
 }
 
@@ -190,6 +279,12 @@ static void context_close(struct i915_gem_context *ctx)
 {
        i915_gem_context_set_closed(ctx);
 
+       /*
+        * This context will never again be assinged to HW, so we can
+        * reuse its ID for the next context.
+        */
+       release_hw_id(ctx);
+
        /*
         * The LUT uses the VMA as a backpointer to unref the object,
         * so we need to clear the LUT before we close all the VMA (inside
@@ -203,43 +298,6 @@ static void context_close(struct i915_gem_context *ctx)
        i915_gem_context_put(ctx);
 }
 
-static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
-{
-       int ret;
-       unsigned int max;
-
-       if (INTEL_GEN(dev_priv) >= 11) {
-               max = GEN11_MAX_CONTEXT_HW_ID;
-       } else {
-               /*
-                * When using GuC in proxy submission, GuC consumes the
-                * highest bit in the context id to indicate proxy submission.
-                */
-               if (USES_GUC_SUBMISSION(dev_priv))
-                       max = MAX_GUC_CONTEXT_HW_ID;
-               else
-                       max = MAX_CONTEXT_HW_ID;
-       }
-
-
-       ret = ida_simple_get(&dev_priv->contexts.hw_ida,
-                            0, max, GFP_KERNEL);
-       if (ret < 0) {
-               /* Contexts are only released when no longer active.
-                * Flush any pending retires to hopefully release some
-                * stale contexts and try again.
-                */
-               i915_retire_requests(dev_priv);
-               ret = ida_simple_get(&dev_priv->contexts.hw_ida,
-                                    0, max, GFP_KERNEL);
-               if (ret < 0)
-                       return ret;
-       }
-
-       *out = ret;
-       return 0;
-}
-
 static u32 default_desc_template(const struct drm_i915_private *i915,
                                 const struct i915_hw_ppgtt *ppgtt)
 {
@@ -276,12 +334,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        if (ctx == NULL)
                return ERR_PTR(-ENOMEM);
 
-       ret = assign_hw_id(dev_priv, &ctx->hw_id);
-       if (ret) {
-               kfree(ctx);
-               return ERR_PTR(ret);
-       }
-
        kref_init(&ctx->ref);
        list_add_tail(&ctx->link, &dev_priv->contexts.list);
        ctx->i915 = dev_priv;
@@ -295,6 +347,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
+       INIT_LIST_HEAD(&ctx->hw_id_link);
 
        /* Default context will never have a file_priv */
        ret = DEFAULT_CONTEXT_HANDLE;
@@ -329,16 +382,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        ctx->desc_template =
                default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
 
-       /*
-        * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not
-        * present or not in use we still need a small bias as ring wraparound
-        * at offset 0 sometimes hangs. No idea why.
-        */
-       if (USES_GUC(dev_priv))
-               ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias;
-       else
-               ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
-
        return ctx;
 
 err_pid:
@@ -431,15 +474,35 @@ out:
        return ctx;
 }
 
+static void
+destroy_kernel_context(struct i915_gem_context **ctxp)
+{
+       struct i915_gem_context *ctx;
+
+       /* Keep the context ref so that we can free it immediately ourselves */
+       ctx = i915_gem_context_get(fetch_and_zero(ctxp));
+       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+       context_close(ctx);
+       i915_gem_context_free(ctx);
+}
+
 struct i915_gem_context *
 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 {
        struct i915_gem_context *ctx;
+       int err;
 
        ctx = i915_gem_create_context(i915, NULL);
        if (IS_ERR(ctx))
                return ctx;
 
+       err = i915_gem_context_pin_hw_id(ctx);
+       if (err) {
+               destroy_kernel_context(&ctx);
+               return ERR_PTR(err);
+       }
+
        i915_gem_context_clear_bannable(ctx);
        ctx->sched.priority = prio;
        ctx->ring_size = PAGE_SIZE;
@@ -449,17 +512,19 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
        return ctx;
 }
 
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
+static void init_contexts(struct drm_i915_private *i915)
 {
-       struct i915_gem_context *ctx;
+       mutex_init(&i915->contexts.mutex);
+       INIT_LIST_HEAD(&i915->contexts.list);
 
-       /* Keep the context ref so that we can free it immediately ourselves */
-       ctx = i915_gem_context_get(fetch_and_zero(ctxp));
-       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+       /* Using the simple ida interface, the max is limited by sizeof(int) */
+       BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+       BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
+       ida_init(&i915->contexts.hw_ida);
+       INIT_LIST_HEAD(&i915->contexts.hw_id_list);
 
-       context_close(ctx);
-       i915_gem_context_free(ctx);
+       INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
+       init_llist_head(&i915->contexts.free_list);
 }
 
 static bool needs_preempt_context(struct drm_i915_private *i915)
@@ -480,14 +545,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        if (ret)
                return ret;
 
-       INIT_LIST_HEAD(&dev_priv->contexts.list);
-       INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
-       init_llist_head(&dev_priv->contexts.free_list);
-
-       /* Using the simple ida interface, the max is limited by sizeof(int) */
-       BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
-       BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
-       ida_init(&dev_priv->contexts.hw_ida);
+       init_contexts(dev_priv);
 
        /* lowest priority; idle task */
        ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
@@ -497,9 +555,13 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        }
        /*
         * For easy recognisablity, we want the kernel context to be 0 and then
-        * all user contexts will have non-zero hw_id.
+        * all user contexts will have non-zero hw_id. Kernel contexts are
+        * permanently pinned, so that we never suffer a stall and can
+        * use them from any allocation context (e.g. for evicting other
+        * contexts and from inside the shrinker).
         */
        GEM_BUG_ON(ctx->hw_id);
+       GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
        dev_priv->kernel_context = ctx;
 
        /* highest priority; preempting task */
@@ -537,6 +599,7 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
        destroy_kernel_context(&i915->kernel_context);
 
        /* Must free all deferred contexts (via flush_workqueue) first */
+       GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
        ida_destroy(&i915->contexts.hw_ida);
 }
 
@@ -942,6 +1005,33 @@ out:
        return ret;
 }
 
+int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+{
+       struct drm_i915_private *i915 = ctx->i915;
+       int err = 0;
+
+       mutex_lock(&i915->contexts.mutex);
+
+       GEM_BUG_ON(i915_gem_context_is_closed(ctx));
+
+       if (list_empty(&ctx->hw_id_link)) {
+               GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
+
+               err = assign_hw_id(i915, &ctx->hw_id);
+               if (err)
+                       goto out_unlock;
+
+               list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
+       }
+
+       GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
+       atomic_inc(&ctx->hw_id_pin_count);
+
+out_unlock:
+       mutex_unlock(&i915->contexts.mutex);
+       return err;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_context.c"
 #include "selftests/i915_gem_context.c"
index b116e4942c10d13eb7e9771a4f50e737e47d185a..e09673ca731d37c185c6d655d95d5df372827df1 100644 (file)
@@ -134,8 +134,16 @@ struct i915_gem_context {
         * functions like fault reporting, PASID, scheduling. The
         * &drm_i915_private.context_hw_ida is used to assign a unqiue
         * id for the lifetime of the context.
+        *
+        * @hw_id_pin_count: - number of times this context had been pinned
+        * for use (should be, at most, once per engine).
+        *
+        * @hw_id_link: - all contexts with an assigned id are tracked
+        * for possible repossession.
         */
        unsigned int hw_id;
+       atomic_t hw_id_pin_count;
+       struct list_head hw_id_link;
 
        /**
         * @user_handle: userspace identifier
@@ -147,9 +155,6 @@ struct i915_gem_context {
 
        struct i915_sched_attr sched;
 
-       /** ggtt_offset_bias: placement restriction for context objects */
-       u32 ggtt_offset_bias;
-
        /** engine: per-engine logical HW state */
        struct intel_context {
                struct i915_gem_context *gem_context;
@@ -257,6 +262,21 @@ static inline void i915_gem_context_set_force_single_submission(struct i915_gem_
        __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
 }
 
+int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
+static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+{
+       if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
+               return 0;
+
+       return __i915_gem_context_pin_hw_id(ctx);
+}
+
+static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
+{
+       GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
+       atomic_dec(&ctx->hw_id_pin_count);
+}
+
 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
 {
        return c->user_handle == DEFAULT_CONTEXT_HANDLE;
index 3f0c612d42e786d44cff5c86b59bb1da27c0fea9..7d0b3a2c30e2e86bff42b2fe8492e245964e429c 100644 (file)
@@ -64,7 +64,9 @@ enum {
 #define BATCH_OFFSET_BIAS (256*1024)
 
 #define __I915_EXEC_ILLEGAL_FLAGS \
-       (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
+       (__I915_EXEC_UNKNOWN_FLAGS | \
+        I915_EXEC_CONSTANTS_MASK  | \
+        I915_EXEC_RESOURCE_STREAMER)
 
 /* Catch emission of unexpected errors for CI! */
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -733,7 +735,12 @@ static int eb_select_context(struct i915_execbuffer *eb)
                return -ENOENT;
 
        eb->ctx = ctx;
-       eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
+       if (ctx->ppgtt) {
+               eb->vm = &ctx->ppgtt->vm;
+               eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
+       } else {
+               eb->vm = &eb->i915->ggtt.vm;
+       }
 
        eb->context_flags = 0;
        if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -1120,6 +1127,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        u32 *cmd;
        int err;
 
+       if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) {
+               obj = vma->obj;
+               if (obj->cache_dirty & ~obj->cache_coherent)
+                       i915_gem_clflush_object(obj, 0);
+               obj->write_domain = 0;
+       }
+
        GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
 
        obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
@@ -1484,8 +1498,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
                                 * can read from this userspace address.
                                 */
                                offset = gen8_canonical_addr(offset & ~UPDATE);
-                               __put_user(offset,
-                                          &urelocs[r-stack].presumed_offset);
+                               if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
+                                       remain = -EFAULT;
+                                       goto out;
+                               }
                        }
                } while (r++, --count);
                urelocs += ARRAY_SIZE(stack);
@@ -1570,7 +1586,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
 
                relocs = kvmalloc_array(size, 1, GFP_KERNEL);
                if (!relocs) {
-                       kvfree(relocs);
                        err = -ENOMEM;
                        goto err;
                }
@@ -1584,6 +1599,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
                        if (__copy_from_user((char *)relocs + copied,
                                             (char __user *)urelocs + copied,
                                             len)) {
+end_user:
                                kvfree(relocs);
                                err = -EFAULT;
                                goto err;
@@ -1607,7 +1623,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
                        unsafe_put_user(-1,
                                        &urelocs[copied].presumed_offset,
                                        end_user);
-end_user:
                user_access_end();
 
                eb->exec[i].relocs_ptr = (uintptr_t)relocs;
@@ -2199,8 +2214,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
 
        eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
-       if (USES_FULL_PPGTT(eb.i915))
-               eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
        reloc_cache_init(&eb.reloc_cache, eb.i915);
 
        eb.buffer_count = args->buffer_count;
@@ -2221,20 +2234,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        if (!eb.engine)
                return -EINVAL;
 
-       if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
-               if (!HAS_RESOURCE_STREAMER(eb.i915)) {
-                       DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
-                       return -EINVAL;
-               }
-               if (eb.engine->id != RCS) {
-                       DRM_DEBUG("RS is not available on %s\n",
-                                eb.engine->name);
-                       return -EINVAL;
-               }
-
-               eb.batch_flags |= I915_DISPATCH_RS;
-       }
-
        if (args->flags & I915_EXEC_FENCE_IN) {
                in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
                if (!in_fence)
index f00c7fbef79efc6886116e01dc023fe9008c78f4..eb0e446d648232458bc7ba0eca0771ec5dd3e66b 100644 (file)
@@ -173,19 +173,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                return 0;
        }
 
-       /* Early VLV doesn't have this */
-       if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
-               DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
-               return 0;
-       }
-
-       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-               if (has_full_48bit_ppgtt)
-                       return 3;
+       if (has_full_48bit_ppgtt)
+               return 3;
 
-               if (has_full_ppgtt)
-                       return 2;
-       }
+       if (has_full_ppgtt)
+               return 2;
 
        return 1;
 }
@@ -1259,9 +1251,6 @@ static void gen8_free_page_tables(struct i915_address_space *vm,
 {
        int i;
 
-       if (!px_page(pd))
-               return;
-
        for (i = 0; i < I915_PDES; i++) {
                if (pd->page_table[i] != vm->scratch_pt)
                        free_pt(vm, pd->page_table[i]);
@@ -2348,7 +2337,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
        return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
 }
 
-static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
+static void gen6_check_faults(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -2366,15 +2355,11 @@ static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
                                         fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
                                         RING_FAULT_SRCID(fault),
                                         RING_FAULT_FAULT_TYPE(fault));
-                       I915_WRITE(RING_FAULT_REG(engine),
-                                  fault & ~RING_FAULT_VALID);
                }
        }
-
-       POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
 }
 
-static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
+static void gen8_check_faults(struct drm_i915_private *dev_priv)
 {
        u32 fault = I915_READ(GEN8_RING_FAULT_REG);
 
@@ -2399,22 +2384,20 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
                                 GEN8_RING_FAULT_ENGINE_ID(fault),
                                 RING_FAULT_SRCID(fault),
                                 RING_FAULT_FAULT_TYPE(fault));
-               I915_WRITE(GEN8_RING_FAULT_REG,
-                          fault & ~RING_FAULT_VALID);
        }
-
-       POSTING_READ(GEN8_RING_FAULT_REG);
 }
 
 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
 {
        /* From GEN8 onwards we only have one 'All Engine Fault Register' */
        if (INTEL_GEN(dev_priv) >= 8)
-               gen8_check_and_clear_faults(dev_priv);
+               gen8_check_faults(dev_priv);
        else if (INTEL_GEN(dev_priv) >= 6)
-               gen6_check_and_clear_faults(dev_priv);
+               gen6_check_faults(dev_priv);
        else
                return;
+
+       i915_clear_error_registers(dev_priv);
 }
 
 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -2937,6 +2920,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
        struct drm_mm_node *entry;
        int ret;
 
+       /*
+        * GuC requires all resources that we're sharing with it to be placed in
+        * non-WOPCM memory. If GuC is not present or not in use we still need a
+        * small bias as ring wraparound at offset 0 sometimes hangs. No idea
+        * why.
+        */
+       ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
+                              intel_guc_reserved_gtt_size(&dev_priv->guc));
+
        ret = intel_vgt_balloon(dev_priv);
        if (ret)
                return ret;
@@ -3612,6 +3604,8 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_address_space_init(&ggtt->vm, dev_priv);
 
+       ggtt->vm.is_ggtt = true;
+
        /* Only VLV supports read-only GGTT mappings */
        ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
 
@@ -3662,6 +3656,10 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
+       /* XXX Temporary pardon for error unload */
+       if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
+               return;
+
        /* We should only be called after i915_ggtt_enable_guc() */
        GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
 
index 2a116a91420bc6fab6b31c3638a596bb64b61a33..7e2af5f4f39bcbb5ec355257d41decea7b45d019 100644 (file)
@@ -167,29 +167,22 @@ struct intel_rotation_info {
        } plane[2];
 } __packed;
 
-static inline void assert_intel_rotation_info_is_packed(void)
-{
-       BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
-}
-
 struct intel_partial_info {
        u64 offset;
        unsigned int size;
 } __packed;
 
-static inline void assert_intel_partial_info_is_packed(void)
-{
-       BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
-}
-
 enum i915_ggtt_view_type {
        I915_GGTT_VIEW_NORMAL = 0,
        I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
        I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
 };
 
-static inline void assert_i915_ggtt_view_type_is_unique(void)
+static inline void assert_i915_gem_gtt_types(void)
 {
+       BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+       BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+
        /* As we encode the size of each branch inside the union into its type,
         * we have to be careful that each branch has a unique size.
         */
@@ -229,7 +222,6 @@ struct i915_page_dma {
 };
 
 #define px_base(px) (&(px)->base)
-#define px_page(px) (px_base(px)->page)
 #define px_dma(px) (px_base(px)->daddr)
 
 struct i915_page_table {
@@ -332,6 +324,9 @@ struct i915_address_space {
 
        struct pagestash free_pages;
 
+       /* Global GTT */
+       bool is_ggtt:1;
+
        /* Some systems require uncached updates of the page directories */
        bool pt_kmap_wc:1;
 
@@ -365,7 +360,7 @@ struct i915_address_space {
        I915_SELFTEST_DECLARE(bool scrub_64K);
 };
 
-#define i915_is_ggtt(V) (!(V)->file)
+#define i915_is_ggtt(vm) ((vm)->is_ggtt)
 
 static inline bool
 i915_vm_is_48bit(const struct i915_address_space *vm)
@@ -401,6 +396,8 @@ struct i915_ggtt {
 
        int mtrr;
 
+       u32 pin_bias;
+
        struct drm_mm_node error_capture;
 };
 
index 83e5e01fa9eaa9c8586445329089959290f2f32d..a6dd7c46de0dddb4a8f2c09a21651ac1c1a8cb21 100644 (file)
@@ -421,19 +421,19 @@ i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 }
 
 static inline unsigned int
-i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
 {
        return obj->tiling_and_stride & TILING_MASK;
 }
 
 static inline bool
-i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
 {
        return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
 }
 
 static inline unsigned int
-i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
 {
        return obj->tiling_and_stride & STRIDE_MASK;
 }
@@ -446,13 +446,13 @@ i915_gem_tile_height(unsigned int tiling)
 }
 
 static inline unsigned int
-i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
+i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
 {
        return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
 }
 
 static inline unsigned int
-i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
+i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
 {
        return (i915_gem_object_get_stride(obj) *
                i915_gem_object_get_tile_height(obj));
index 90628a47ae17f81312dff51ddbc89aff4af55654..10f28a2ee2e6f5ac93f5a144b4fda52f4a2f78c0 100644 (file)
@@ -478,7 +478,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
        spin_lock_irq(&dev_priv->irq_lock);
-       gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
+       gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
        dev_priv->gt_pm.rps.pm_iir = 0;
        spin_unlock_irq(&dev_priv->irq_lock);
 }
@@ -516,7 +516,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 
        I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
-       gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+       gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
 
        spin_unlock_irq(&dev_priv->irq_lock);
        synchronize_irq(dev_priv->drm.irq);
@@ -1534,11 +1534,8 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
 
        if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
                gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
-               if (likely(gt_iir[2] & (i915->pm_rps_events |
-                                       i915->pm_guc_events)))
-                       raw_reg_write(regs, GEN8_GT_IIR(2),
-                                     gt_iir[2] & (i915->pm_rps_events |
-                                                  i915->pm_guc_events));
+               if (likely(gt_iir[2]))
+                       raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
        }
 
        if (master_ctl & GEN8_GT_VECS_IRQ) {
@@ -3218,7 +3215,7 @@ static void i915_reset_device(struct drm_i915_private *dev_priv,
                kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
 }
 
-static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+void i915_clear_error_registers(struct drm_i915_private *dev_priv)
 {
        u32 eir;
 
@@ -3241,6 +3238,22 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
                I915_WRITE(EMR, I915_READ(EMR) | eir);
                I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
        }
+
+       if (INTEL_GEN(dev_priv) >= 8) {
+               I915_WRITE(GEN8_RING_FAULT_REG,
+                          I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
+               POSTING_READ(GEN8_RING_FAULT_REG);
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               struct intel_engine_cs *engine;
+               enum intel_engine_id id;
+
+               for_each_engine(engine, dev_priv, id) {
+                       I915_WRITE(RING_FAULT_REG(engine),
+                                  I915_READ(RING_FAULT_REG(engine)) &
+                                  ~RING_FAULT_VALID);
+               }
+               POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+       }
 }
 
 /**
@@ -3296,7 +3309,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
         * Try engine reset when available. We fall back to full reset if
         * single reset fails.
         */
-       if (intel_has_reset_engine(dev_priv)) {
+       if (intel_has_reset_engine(dev_priv) &&
+           !i915_terminally_wedged(&dev_priv->gpu_error)) {
                for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
                        BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
                        if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
@@ -4781,7 +4795,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                /* WaGsvRC0ResidencyMethod:vlv */
                dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
        else
-               dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+               dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
+                                          GEN6_PM_RP_DOWN_THRESHOLD |
+                                          GEN6_PM_RP_DOWN_TIMEOUT);
 
        rps->pm_intrmsk_mbz = 0;
 
index 6a4d1388ad2d39b2f972e0d1e1270bab8d3ce8df..d6f7b9fe1d261fcaa4630f88659427deca873bf8 100644 (file)
@@ -74,6 +74,7 @@
        .unfenced_needs_alignment = 1, \
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
+       .has_coherent_ggtt = false, \
        GEN_DEFAULT_PIPEOFFSETS, \
        GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
@@ -110,6 +111,7 @@ static const struct intel_device_info intel_i865g_info = {
        .has_gmch_display = 1, \
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
+       .has_coherent_ggtt = true, \
        GEN_DEFAULT_PIPEOFFSETS, \
        GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
@@ -117,6 +119,7 @@ static const struct intel_device_info intel_i865g_info = {
 static const struct intel_device_info intel_i915g_info = {
        GEN3_FEATURES,
        PLATFORM(INTEL_I915G),
+       .has_coherent_ggtt = false,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .hws_needs_physical = 1,
@@ -178,6 +181,7 @@ static const struct intel_device_info intel_pineview_info = {
        .has_gmch_display = 1, \
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
+       .has_coherent_ggtt = true, \
        GEN_DEFAULT_PIPEOFFSETS, \
        GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
@@ -220,6 +224,7 @@ static const struct intel_device_info intel_gm45_info = {
        .has_hotplug = 1, \
        .ring_mask = RENDER_RING | BSD_RING, \
        .has_snoop = true, \
+       .has_coherent_ggtt = true, \
        /* ilk does support rc6, but we do not implement [power] contexts */ \
        .has_rc6 = 0, \
        GEN_DEFAULT_PIPEOFFSETS, \
@@ -243,6 +248,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
        .has_hotplug = 1, \
        .has_fbc = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_rc6 = 1, \
        .has_rc6p = 1, \
@@ -287,6 +293,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
        .has_hotplug = 1, \
        .has_fbc = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_rc6 = 1, \
        .has_rc6p = 1, \
@@ -347,6 +354,7 @@ static const struct intel_device_info intel_valleyview_info = {
        .has_aliasing_ppgtt = 1,
        .has_full_ppgtt = 1,
        .has_snoop = true,
+       .has_coherent_ggtt = false,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .display_mmio_offset = VLV_DISPLAY_BASE,
        GEN_DEFAULT_PAGE_SIZES,
@@ -360,7 +368,6 @@ static const struct intel_device_info intel_valleyview_info = {
        .has_ddi = 1, \
        .has_fpga_dbg = 1, \
        .has_psr = 1, \
-       .has_resource_streamer = 1, \
        .has_dp_mst = 1, \
        .has_rc6p = 0 /* RC6p removed-by HSW */, \
        .has_runtime_pm = 1
@@ -433,7 +440,6 @@ static const struct intel_device_info intel_cherryview_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_64bit_reloc = 1,
        .has_runtime_pm = 1,
-       .has_resource_streamer = 1,
        .has_rc6 = 1,
        .has_logical_ring_contexts = 1,
        .has_gmch_display = 1,
@@ -441,6 +447,7 @@ static const struct intel_device_info intel_cherryview_info = {
        .has_full_ppgtt = 1,
        .has_reset_engine = 1,
        .has_snoop = true,
+       .has_coherent_ggtt = false,
        .display_mmio_offset = VLV_DISPLAY_BASE,
        GEN_DEFAULT_PAGE_SIZES,
        GEN_CHV_PIPEOFFSETS,
@@ -506,7 +513,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
        .has_runtime_pm = 1, \
        .has_pooled_eu = 0, \
        .has_csr = 1, \
-       .has_resource_streamer = 1, \
        .has_rc6 = 1, \
        .has_dp_mst = 1, \
        .has_logical_ring_contexts = 1, \
@@ -517,6 +523,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
        .has_full_48bit_ppgtt = 1, \
        .has_reset_engine = 1, \
        .has_snoop = true, \
+       .has_coherent_ggtt = false, \
        .has_ipc = 1, \
        GEN9_DEFAULT_PAGE_SIZES, \
        GEN_DEFAULT_PIPEOFFSETS, \
@@ -580,6 +587,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
        GEN9_FEATURES, \
        GEN(10), \
        .ddb_size = 1024, \
+       .has_coherent_ggtt = false, \
        GLK_COLORS
 
 static const struct intel_device_info intel_cannonlake_info = {
@@ -592,14 +600,12 @@ static const struct intel_device_info intel_cannonlake_info = {
        GEN10_FEATURES, \
        GEN(11), \
        .ddb_size = 2048, \
-       .has_csr = 0, \
        .has_logical_ring_elsq = 1
 
 static const struct intel_device_info intel_icelake_11_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_ICELAKE),
        .is_alpha_support = 1,
-       .has_resource_streamer = 0,
        .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
 };
 
index 6bf10952c7240363fdcd2df907979ef9aef0d247..ccb20230df2c456d038ced6305b28b44d25916f7 100644 (file)
 #include "i915_oa_cflgt3.h"
 #include "i915_oa_cnl.h"
 #include "i915_oa_icl.h"
+#include "intel_lrc_reg.h"
 
 /* HW requires this to be a power of two, between 128k and 16M, though driver
  * is currently generally designed assuming the largest 16M size is used such
@@ -1338,14 +1339,12 @@ free_oa_buffer(struct drm_i915_private *i915)
 {
        mutex_lock(&i915->drm.struct_mutex);
 
-       i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
-       i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
-       i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
-
-       i915->perf.oa.oa_buffer.vma = NULL;
-       i915->perf.oa.oa_buffer.vaddr = NULL;
+       i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
+                                  I915_VMA_RELEASE_MAP);
 
        mutex_unlock(&i915->drm.struct_mutex);
+
+       i915->perf.oa.oa_buffer.vaddr = NULL;
 }
 
 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
@@ -1638,27 +1637,25 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
        u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
        u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
        /* The MMIO offsets for Flex EU registers aren't contiguous */
-       u32 flex_mmio[] = {
-               i915_mmio_reg_offset(EU_PERF_CNTL0),
-               i915_mmio_reg_offset(EU_PERF_CNTL1),
-               i915_mmio_reg_offset(EU_PERF_CNTL2),
-               i915_mmio_reg_offset(EU_PERF_CNTL3),
-               i915_mmio_reg_offset(EU_PERF_CNTL4),
-               i915_mmio_reg_offset(EU_PERF_CNTL5),
-               i915_mmio_reg_offset(EU_PERF_CNTL6),
+       i915_reg_t flex_regs[] = {
+               EU_PERF_CNTL0,
+               EU_PERF_CNTL1,
+               EU_PERF_CNTL2,
+               EU_PERF_CNTL3,
+               EU_PERF_CNTL4,
+               EU_PERF_CNTL5,
+               EU_PERF_CNTL6,
        };
        int i;
 
-       reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
-       reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
-                                     GEN8_OA_TIMER_PERIOD_SHIFT) |
-                                    (dev_priv->perf.oa.periodic ?
-                                     GEN8_OA_TIMER_ENABLE : 0) |
-                                    GEN8_OA_COUNTER_RESUME;
+       CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
+               (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+               (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+               GEN8_OA_COUNTER_RESUME);
 
-       for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
+       for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
                u32 state_offset = ctx_flexeu0 + i * 2;
-               u32 mmio = flex_mmio[i];
+               u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
 
                /*
                 * This arbitrary default will select the 'EU FPU0 Pipeline
@@ -1678,8 +1675,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
                        }
                }
 
-               reg_state[state_offset] = mmio;
-               reg_state[state_offset+1] = value;
+               CTX_REG(reg_state, state_offset, flex_regs[i], value);
        }
 }
 
@@ -1821,7 +1817,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
        /* Switch away from any user context. */
        ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
        if (ret)
-               goto out;
+               return ret;
 
        /*
         * The OA register config is setup through the context image. This image
@@ -1840,7 +1836,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
                                     wait_flags,
                                     MAX_SCHEDULE_TIMEOUT);
        if (ret)
-               goto out;
+               return ret;
 
        /* Update all contexts now that we've stalled the submission. */
        list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
@@ -1852,10 +1848,8 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
                        continue;
 
                regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
-               if (IS_ERR(regs)) {
-                       ret = PTR_ERR(regs);
-                       goto out;
-               }
+               if (IS_ERR(regs))
+                       return PTR_ERR(regs);
 
                ce->state->obj->mm.dirty = true;
                regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
@@ -1865,7 +1859,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
                i915_gem_object_unpin_map(ce->state->obj);
        }
 
- out:
        return ret;
 }
 
index 08ec7446282e7f981f74272e8c29c755abad9e21..09bc8e730ee1260553ecdf30a0ed170f6e3fc968 100644 (file)
@@ -344,6 +344,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN8_RPCS_S_CNT_ENABLE       (1 << 18)
 #define   GEN8_RPCS_S_CNT_SHIFT                15
 #define   GEN8_RPCS_S_CNT_MASK         (0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define   GEN11_RPCS_S_CNT_SHIFT       12
+#define   GEN11_RPCS_S_CNT_MASK                (0x3f << GEN11_RPCS_S_CNT_SHIFT)
 #define   GEN8_RPCS_SS_CNT_ENABLE      (1 << 11)
 #define   GEN8_RPCS_SS_CNT_SHIFT       8
 #define   GEN8_RPCS_SS_CNT_MASK                (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
@@ -1029,126 +1031,43 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 /*
  * i915_power_well_id:
  *
- * Platform specific IDs used to look up power wells and - except for custom
- * power wells - to define request/status register flag bit positions. As such
- * the set of IDs on a given platform must be unique and except for custom
- * power wells their value must stay fixed.
+ * IDs used to look up power wells. Power wells accessed directly bypassing
+ * the power domains framework must be assigned a unique ID. The rest of power
+ * wells must be assigned DISP_PW_ID_NONE.
  */
 enum i915_power_well_id {
-       /*
-        * I830
-        *  - custom power well
-        */
-       I830_DISP_PW_PIPES = 0,
-
-       /*
-        * VLV/CHV
-        *  - PUNIT_REG_PWRGT_CTRL (bit: id*2),
-        *    PUNIT_REG_PWRGT_STATUS (bit: id*2) (PUNIT HAS v0.8)
-        */
-       PUNIT_POWER_WELL_RENDER                 = 0,
-       PUNIT_POWER_WELL_MEDIA                  = 1,
-       PUNIT_POWER_WELL_DISP2D                 = 3,
-       PUNIT_POWER_WELL_DPIO_CMN_BC            = 5,
-       PUNIT_POWER_WELL_DPIO_TX_B_LANES_01     = 6,
-       PUNIT_POWER_WELL_DPIO_TX_B_LANES_23     = 7,
-       PUNIT_POWER_WELL_DPIO_TX_C_LANES_01     = 8,
-       PUNIT_POWER_WELL_DPIO_TX_C_LANES_23     = 9,
-       PUNIT_POWER_WELL_DPIO_RX0               = 10,
-       PUNIT_POWER_WELL_DPIO_RX1               = 11,
-       PUNIT_POWER_WELL_DPIO_CMN_D             = 12,
-       /*  - custom power well */
-       CHV_DISP_PW_PIPE_A,                     /* 13 */
-
-       /*
-        * HSW/BDW
-        *  - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
-        */
-       HSW_DISP_PW_GLOBAL = 15,
-
-       /*
-        * GEN9+
-        *  - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
-        */
-       SKL_DISP_PW_MISC_IO = 0,
-       SKL_DISP_PW_DDI_A_E,
-       GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
-       CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
-       SKL_DISP_PW_DDI_B,
-       SKL_DISP_PW_DDI_C,
-       SKL_DISP_PW_DDI_D,
-       CNL_DISP_PW_DDI_F = 6,
-
-       GLK_DISP_PW_AUX_A = 8,
-       GLK_DISP_PW_AUX_B,
-       GLK_DISP_PW_AUX_C,
-       CNL_DISP_PW_AUX_A = GLK_DISP_PW_AUX_A,
-       CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B,
-       CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C,
-       CNL_DISP_PW_AUX_D,
-       CNL_DISP_PW_AUX_F,
-
-       SKL_DISP_PW_1 = 14,
+       DISP_PW_ID_NONE,
+
+       VLV_DISP_PW_DISP2D,
+       BXT_DISP_PW_DPIO_CMN_A,
+       VLV_DISP_PW_DPIO_CMN_BC,
+       GLK_DISP_PW_DPIO_CMN_C,
+       CHV_DISP_PW_DPIO_CMN_D,
+       HSW_DISP_PW_GLOBAL,
+       SKL_DISP_PW_MISC_IO,
+       SKL_DISP_PW_1,
        SKL_DISP_PW_2,
-
-       /* - custom power wells */
-       BXT_DPIO_CMN_A,
-       BXT_DPIO_CMN_BC,
-       GLK_DPIO_CMN_C,                 /* 18 */
-
-       /*
-        * GEN11+
-        *  - _HSW_PWR_WELL_CTL1-4
-        *    (status bit: (id&15)*2, req bit:(id&15)*2+1)
-        */
-       ICL_DISP_PW_1 = 0,
-       ICL_DISP_PW_2,
-       ICL_DISP_PW_3,
-       ICL_DISP_PW_4,
-
-       /*
-        *  - _HSW_PWR_WELL_CTL_AUX1/2/4
-        *    (status bit: (id&15)*2, req bit:(id&15)*2+1)
-        */
-       ICL_DISP_PW_AUX_A = 16,
-       ICL_DISP_PW_AUX_B,
-       ICL_DISP_PW_AUX_C,
-       ICL_DISP_PW_AUX_D,
-       ICL_DISP_PW_AUX_E,
-       ICL_DISP_PW_AUX_F,
-
-       ICL_DISP_PW_AUX_TBT1 = 24,
-       ICL_DISP_PW_AUX_TBT2,
-       ICL_DISP_PW_AUX_TBT3,
-       ICL_DISP_PW_AUX_TBT4,
-
-       /*
-        *  - _HSW_PWR_WELL_CTL_DDI1/2/4
-        *    (status bit: (id&15)*2, req bit:(id&15)*2+1)
-        */
-       ICL_DISP_PW_DDI_A = 32,
-       ICL_DISP_PW_DDI_B,
-       ICL_DISP_PW_DDI_C,
-       ICL_DISP_PW_DDI_D,
-       ICL_DISP_PW_DDI_E,
-       ICL_DISP_PW_DDI_F,                      /* 37 */
-
-       /*
-        * Multiple platforms.
-        * Must start following the highest ID of any platform.
-        * - custom power wells
-        */
-       SKL_DISP_PW_DC_OFF = 38,
-       I915_DISP_PW_ALWAYS_ON,
 };
 
 #define PUNIT_REG_PWRGT_CTRL                   0x60
 #define PUNIT_REG_PWRGT_STATUS                 0x61
-#define   PUNIT_PWRGT_MASK(power_well)         (3 << ((power_well) * 2))
-#define   PUNIT_PWRGT_PWR_ON(power_well)       (0 << ((power_well) * 2))
-#define   PUNIT_PWRGT_CLK_GATE(power_well)     (1 << ((power_well) * 2))
-#define   PUNIT_PWRGT_RESET(power_well)                (2 << ((power_well) * 2))
-#define   PUNIT_PWRGT_PWR_GATE(power_well)     (3 << ((power_well) * 2))
+#define   PUNIT_PWRGT_MASK(pw_idx)             (3 << ((pw_idx) * 2))
+#define   PUNIT_PWRGT_PWR_ON(pw_idx)           (0 << ((pw_idx) * 2))
+#define   PUNIT_PWRGT_CLK_GATE(pw_idx)         (1 << ((pw_idx) * 2))
+#define   PUNIT_PWRGT_RESET(pw_idx)            (2 << ((pw_idx) * 2))
+#define   PUNIT_PWRGT_PWR_GATE(pw_idx)         (3 << ((pw_idx) * 2))
+
+#define PUNIT_PWGT_IDX_RENDER                  0
+#define PUNIT_PWGT_IDX_MEDIA                   1
+#define PUNIT_PWGT_IDX_DISP2D                  3
+#define PUNIT_PWGT_IDX_DPIO_CMN_BC             5
+#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01      6
+#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23      7
+#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01      8
+#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23      9
+#define PUNIT_PWGT_IDX_DPIO_RX0                        10
+#define PUNIT_PWGT_IDX_DPIO_RX1                        11
+#define PUNIT_PWGT_IDX_DPIO_CMN_D              12
 
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
@@ -1932,121 +1851,200 @@ enum i915_power_well_id {
 #define   N_SCALAR(x)                  ((x) << 24)
 #define   N_SCALAR_MASK                        (0x7F << 24)
 
-#define _ICL_MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
        _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
 
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1            0x16812C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1            0x16852C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2            0x16912C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT2            0x16952C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT3            0x16A12C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT3            0x16A52C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT4            0x16B12C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT4            0x16B52C
-#define ICL_PORT_MG_TX1_LINK_PARAMS(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
-                                     _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
-                                     _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1            0x1680AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1            0x1684AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2            0x1690AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT2            0x1694AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT3            0x16A0AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT3            0x16A4AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT4            0x16B0AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT4            0x16B4AC
-#define ICL_PORT_MG_TX2_LINK_PARAMS(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
-                                     _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
-                                     _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1)
-#define CRI_USE_FS32                   (1 << 5)
-
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1          0x16814C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1          0x16854C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2          0x16914C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT2          0x16954C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT3          0x16A14C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT3          0x16A54C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT4          0x16B14C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT4          0x16B54C
-#define ICL_PORT_MG_TX1_PISO_READLOAD(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
-                                     _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
-                                     _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1          0x1680CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1          0x1684CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2          0x1690CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT2          0x1694CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT3          0x16A0CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT3          0x16A4CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT4          0x16B0CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT4          0x16B4CC
-#define ICL_PORT_MG_TX2_PISO_READLOAD(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
-                                     _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
-                                     _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1)
-#define CRI_CALCINIT                                   (1 << 1)
-
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1              0x168148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1              0x168548
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2              0x169148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT2              0x169548
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT3              0x16A148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT3              0x16A548
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT4              0x16B148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT4              0x16B548
-#define ICL_PORT_MG_TX1_SWINGCTRL(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1, \
-                                     _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2, \
-                                     _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1              0x1680C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1              0x1684C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2              0x1690C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT2              0x1694C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT3              0x16A0C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT3              0x16A4C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT4              0x16B0C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT4              0x16B4C8
-#define ICL_PORT_MG_TX2_SWINGCTRL(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1, \
-                                     _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2, \
-                                     _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1)
-#define CRI_TXDEEMPH_OVERRIDE_17_12(x)                 ((x) << 0)
-#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK               (0x3F << 0)
-
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1                        0x168144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1                        0x168544
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2                        0x169144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT2                        0x169544
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT3                        0x16A144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT3                        0x16A544
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT4                        0x16B144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT4                        0x16B544
-#define ICL_PORT_MG_TX1_DRVCTRL(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1, \
-                                     _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2, \
-                                     _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1                        0x1680C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1                        0x1684C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2                        0x1690C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT2                        0x1694C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT3                        0x16A0C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT3                        0x16A4C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT4                        0x16B0C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT4                        0x16B4C4
-#define ICL_PORT_MG_TX2_DRVCTRL(port, ln) \
-       _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1, \
-                                     _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2, \
-                                     _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1)
-#define CRI_TXDEEMPH_OVERRIDE_11_6(x)                  ((x) << 24)
-#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK                        (0x3F << 24)
-#define CRI_TXDEEMPH_OVERRIDE_EN                       (1 << 22)
-#define CRI_TXDEEMPH_OVERRIDE_5_0(x)                   ((x) << 16)
-#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK                 (0x3F << 16)
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT1         0x16812C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT1         0x16852C
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT2         0x16912C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT2         0x16952C
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT3         0x16A12C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT3         0x16A52C
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT4         0x16B12C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT4         0x16B52C
+#define MG_TX1_LINK_PARAMS(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+                                MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+                                MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT1         0x1680AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT1         0x1684AC
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT2         0x1690AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT2         0x1694AC
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT3         0x16A0AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT3         0x16A4AC
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT4         0x16B0AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT4         0x16B4AC
+#define MG_TX2_LINK_PARAMS(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+                                MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+                                MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define   CRI_USE_FS32                 (1 << 5)
+
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT1               0x16814C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT1               0x16854C
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT2               0x16914C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT2               0x16954C
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT3               0x16A14C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT3               0x16A54C
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT4               0x16B14C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT4               0x16B54C
+#define MG_TX1_PISO_READLOAD(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+                                MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+                                MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT1               0x1680CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT1               0x1684CC
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT2               0x1690CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT2               0x1694CC
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT3               0x16A0CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT3               0x16A4CC
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT4               0x16B0CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT4               0x16B4CC
+#define MG_TX2_PISO_READLOAD(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+                                MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+                                MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define   CRI_CALCINIT                                 (1 << 1)
+
+#define MG_TX_SWINGCTRL_TX1LN0_PORT1           0x168148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT1           0x168548
+#define MG_TX_SWINGCTRL_TX1LN0_PORT2           0x169148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT2           0x169548
+#define MG_TX_SWINGCTRL_TX1LN0_PORT3           0x16A148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT3           0x16A548
+#define MG_TX_SWINGCTRL_TX1LN0_PORT4           0x16B148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT4           0x16B548
+#define MG_TX1_SWINGCTRL(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+                                MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+                                MG_TX_SWINGCTRL_TX1LN1_PORT1)
+
+#define MG_TX_SWINGCTRL_TX2LN0_PORT1           0x1680C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT1           0x1684C8
+#define MG_TX_SWINGCTRL_TX2LN0_PORT2           0x1690C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT2           0x1694C8
+#define MG_TX_SWINGCTRL_TX2LN0_PORT3           0x16A0C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT3           0x16A4C8
+#define MG_TX_SWINGCTRL_TX2LN0_PORT4           0x16B0C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT4           0x16B4C8
+#define MG_TX2_SWINGCTRL(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+                                MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+                                MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define   CRI_TXDEEMPH_OVERRIDE_17_12(x)               ((x) << 0)
+#define   CRI_TXDEEMPH_OVERRIDE_17_12_MASK             (0x3F << 0)
+
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT1                   0x168144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT1                   0x168544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT2                   0x169144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT2                   0x169544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT3                   0x16A144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT3                   0x16A544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT4                   0x16B144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT4                   0x16B544
+#define MG_TX1_DRVCTRL(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+                                MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
+                                MG_TX_DRVCTRL_TX1LN1_TXPORT1)
+
+#define MG_TX_DRVCTRL_TX2LN0_PORT1                     0x1680C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT1                     0x1684C4
+#define MG_TX_DRVCTRL_TX2LN0_PORT2                     0x1690C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT2                     0x1694C4
+#define MG_TX_DRVCTRL_TX2LN0_PORT3                     0x16A0C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT3                     0x16A4C4
+#define MG_TX_DRVCTRL_TX2LN0_PORT4                     0x16B0C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT4                     0x16B4C4
+#define MG_TX2_DRVCTRL(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+                                MG_TX_DRVCTRL_TX2LN0_PORT2, \
+                                MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define   CRI_TXDEEMPH_OVERRIDE_11_6(x)                        ((x) << 24)
+#define   CRI_TXDEEMPH_OVERRIDE_11_6_MASK              (0x3F << 24)
+#define   CRI_TXDEEMPH_OVERRIDE_EN                     (1 << 22)
+#define   CRI_TXDEEMPH_OVERRIDE_5_0(x)                 ((x) << 16)
+#define   CRI_TXDEEMPH_OVERRIDE_5_0_MASK               (0x3F << 16)
+#define   CRI_LOADGEN_SEL(x)                           ((x) << 12)
+#define   CRI_LOADGEN_SEL_MASK                         (0x3 << 12)
+
+#define MG_CLKHUB_LN0_PORT1                    0x16839C
+#define MG_CLKHUB_LN1_PORT1                    0x16879C
+#define MG_CLKHUB_LN0_PORT2                    0x16939C
+#define MG_CLKHUB_LN1_PORT2                    0x16979C
+#define MG_CLKHUB_LN0_PORT3                    0x16A39C
+#define MG_CLKHUB_LN1_PORT3                    0x16A79C
+#define MG_CLKHUB_LN0_PORT4                    0x16B39C
+#define MG_CLKHUB_LN1_PORT4                    0x16B79C
+#define MG_CLKHUB(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \
+                                MG_CLKHUB_LN0_PORT2, \
+                                MG_CLKHUB_LN1_PORT1)
+#define   CFG_LOW_RATE_LKREN_EN                                (1 << 11)
+
+#define MG_TX_DCC_TX1LN0_PORT1                 0x168110
+#define MG_TX_DCC_TX1LN1_PORT1                 0x168510
+#define MG_TX_DCC_TX1LN0_PORT2                 0x169110
+#define MG_TX_DCC_TX1LN1_PORT2                 0x169510
+#define MG_TX_DCC_TX1LN0_PORT3                 0x16A110
+#define MG_TX_DCC_TX1LN1_PORT3                 0x16A510
+#define MG_TX_DCC_TX1LN0_PORT4                 0x16B110
+#define MG_TX_DCC_TX1LN1_PORT4                 0x16B510
+#define MG_TX1_DCC(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \
+                                MG_TX_DCC_TX1LN0_PORT2, \
+                                MG_TX_DCC_TX1LN1_PORT1)
+#define MG_TX_DCC_TX2LN0_PORT1                 0x168090
+#define MG_TX_DCC_TX2LN1_PORT1                 0x168490
+#define MG_TX_DCC_TX2LN0_PORT2                 0x169090
+#define MG_TX_DCC_TX2LN1_PORT2                 0x169490
+#define MG_TX_DCC_TX2LN0_PORT3                 0x16A090
+#define MG_TX_DCC_TX2LN1_PORT3                 0x16A490
+#define MG_TX_DCC_TX2LN0_PORT4                 0x16B090
+#define MG_TX_DCC_TX2LN1_PORT4                 0x16B490
+#define MG_TX2_DCC(port, ln) \
+       MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \
+                                MG_TX_DCC_TX2LN0_PORT2, \
+                                MG_TX_DCC_TX2LN1_PORT1)
+#define   CFG_AMI_CK_DIV_OVERRIDE_VAL(x)       ((x) << 25)
+#define   CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK     (0x3 << 25)
+#define   CFG_AMI_CK_DIV_OVERRIDE_EN           (1 << 24)
+
+#define MG_DP_MODE_LN0_ACU_PORT1                       0x1683A0
+#define MG_DP_MODE_LN1_ACU_PORT1                       0x1687A0
+#define MG_DP_MODE_LN0_ACU_PORT2                       0x1693A0
+#define MG_DP_MODE_LN1_ACU_PORT2                       0x1697A0
+#define MG_DP_MODE_LN0_ACU_PORT3                       0x16A3A0
+#define MG_DP_MODE_LN1_ACU_PORT3                       0x16A7A0
+#define MG_DP_MODE_LN0_ACU_PORT4                       0x16B3A0
+#define MG_DP_MODE_LN1_ACU_PORT4                       0x16B7A0
+#define MG_DP_MODE(port, ln)   \
+       MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \
+                                MG_DP_MODE_LN0_ACU_PORT2, \
+                                MG_DP_MODE_LN1_ACU_PORT1)
+#define   MG_DP_MODE_CFG_DP_X2_MODE                    (1 << 7)
+#define   MG_DP_MODE_CFG_DP_X1_MODE                    (1 << 6)
+#define   MG_DP_MODE_CFG_TR2PWR_GATING                 (1 << 5)
+#define   MG_DP_MODE_CFG_TRPWR_GATING                  (1 << 4)
+#define   MG_DP_MODE_CFG_CLNPWR_GATING                 (1 << 3)
+#define   MG_DP_MODE_CFG_DIGPWR_GATING                 (1 << 2)
+#define   MG_DP_MODE_CFG_GAONPWR_GATING                        (1 << 1)
+
+#define MG_MISC_SUS0_PORT1                             0x168814
+#define MG_MISC_SUS0_PORT2                             0x169814
+#define MG_MISC_SUS0_PORT3                             0x16A814
+#define MG_MISC_SUS0_PORT4                             0x16B814
+#define MG_MISC_SUS0(tc_port) \
+       _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2))
+#define   MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK     (3 << 14)
+#define   MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x)       ((x) << 14)
+#define   MG_MISC_SUS0_CFG_TR2PWR_GATING               (1 << 12)
+#define   MG_MISC_SUS0_CFG_CL2PWR_GATING               (1 << 11)
+#define   MG_MISC_SUS0_CFG_GAONPWR_GATING              (1 << 10)
+#define   MG_MISC_SUS0_CFG_TRPWR_GATING                        (1 << 7)
+#define   MG_MISC_SUS0_CFG_CL1PWR_GATING               (1 << 6)
+#define   MG_MISC_SUS0_CFG_DGPWR_GATING                        (1 << 5)
 
 /* The spec defines this only for BXT PHY0, but lets assume that this
  * would exist for PHY1 too if it had a second channel.
@@ -3086,18 +3084,9 @@ enum i915_power_well_id {
 /*
  * GPIO regs
  */
-#define GPIOA                  _MMIO(0x5010)
-#define GPIOB                  _MMIO(0x5014)
-#define GPIOC                  _MMIO(0x5018)
-#define GPIOD                  _MMIO(0x501c)
-#define GPIOE                  _MMIO(0x5020)
-#define GPIOF                  _MMIO(0x5024)
-#define GPIOG                  _MMIO(0x5028)
-#define GPIOH                  _MMIO(0x502c)
-#define GPIOJ                  _MMIO(0x5034)
-#define GPIOK                  _MMIO(0x5038)
-#define GPIOL                  _MMIO(0x503C)
-#define GPIOM                  _MMIO(0x5040)
+#define GPIO(gpio)             _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \
+                                     4 * (gpio))
+
 # define GPIO_CLOCK_DIR_MASK           (1 << 0)
 # define GPIO_CLOCK_DIR_IN             (0 << 1)
 # define GPIO_CLOCK_DIR_OUT            (1 << 1)
@@ -5476,6 +5465,7 @@ enum {
 #define   DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL   (1 << 14)
 #define   DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL    (1 << 13)
 #define   DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL   (1 << 12)
+#define   DP_AUX_CH_CTL_TBT_IO                 (1 << 11)
 #define   DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5)
 #define   DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
 #define   DP_AUX_CH_CTL_SYNC_PULSE_SKL(c)   ((c) - 1)
@@ -6527,7 +6517,7 @@ enum {
 #define   PLANE_CTL_YUV422_UYVY                        (1 << 16)
 #define   PLANE_CTL_YUV422_YVYU                        (2 << 16)
 #define   PLANE_CTL_YUV422_VYUY                        (3 << 16)
-#define   PLANE_CTL_DECOMPRESSION_ENABLE       (1 << 15)
+#define   PLANE_CTL_RENDER_DECOMPRESSION_ENABLE        (1 << 15)
 #define   PLANE_CTL_TRICKLE_FEED_DISABLE       (1 << 14)
 #define   PLANE_CTL_PLANE_GAMMA_DISABLE                (1 << 13) /* Pre-GLK */
 #define   PLANE_CTL_TILED_MASK                 (0x7 << 10)
@@ -7207,6 +7197,7 @@ enum {
 #define  GEN11_TC3_HOTPLUG                     (1 << 18)
 #define  GEN11_TC2_HOTPLUG                     (1 << 17)
 #define  GEN11_TC1_HOTPLUG                     (1 << 16)
+#define  GEN11_TC_HOTPLUG(tc_port)             (1 << ((tc_port) + 16))
 #define  GEN11_DE_TC_HOTPLUG_MASK              (GEN11_TC4_HOTPLUG | \
                                                 GEN11_TC3_HOTPLUG | \
                                                 GEN11_TC2_HOTPLUG | \
@@ -7215,6 +7206,7 @@ enum {
 #define  GEN11_TBT3_HOTPLUG                    (1 << 2)
 #define  GEN11_TBT2_HOTPLUG                    (1 << 1)
 #define  GEN11_TBT1_HOTPLUG                    (1 << 0)
+#define  GEN11_TBT_HOTPLUG(tc_port)            (1 << (tc_port))
 #define  GEN11_DE_TBT_HOTPLUG_MASK             (GEN11_TBT4_HOTPLUG | \
                                                 GEN11_TBT3_HOTPLUG | \
                                                 GEN11_TBT2_HOTPLUG | \
@@ -7490,6 +7482,8 @@ enum {
 
 /* PCH */
 
+#define PCH_DISPLAY_BASE       0xc0000u
+
 /* south display engine interrupt: IBX */
 #define SDE_AUDIO_POWER_D      (1 << 27)
 #define SDE_AUDIO_POWER_C      (1 << 26)
@@ -7587,6 +7581,8 @@ enum {
 #define SDE_GMBUS_ICP                  (1 << 23)
 #define SDE_DDIB_HOTPLUG_ICP           (1 << 17)
 #define SDE_DDIA_HOTPLUG_ICP           (1 << 16)
+#define SDE_TC_HOTPLUG_ICP(tc_port)    (1 << ((tc_port) + 24))
+#define SDE_DDI_HOTPLUG_ICP(port)      (1 << ((port) + 16))
 #define SDE_DDI_MASK_ICP               (SDE_DDIB_HOTPLUG_ICP | \
                                         SDE_DDIA_HOTPLUG_ICP)
 #define SDE_TC_MASK_ICP                        (SDE_TC4_HOTPLUG_ICP |  \
@@ -7782,20 +7778,6 @@ enum {
 #define   ICP_TC_HPD_LONG_DETECT(tc_port)      (2 << (tc_port) * 4)
 #define   ICP_TC_HPD_SHORT_DETECT(tc_port)     (1 << (tc_port) * 4)
 
-#define PCH_GPIOA               _MMIO(0xc5010)
-#define PCH_GPIOB               _MMIO(0xc5014)
-#define PCH_GPIOC               _MMIO(0xc5018)
-#define PCH_GPIOD               _MMIO(0xc501c)
-#define PCH_GPIOE               _MMIO(0xc5020)
-#define PCH_GPIOF               _MMIO(0xc5024)
-
-#define PCH_GMBUS0             _MMIO(0xc5100)
-#define PCH_GMBUS1             _MMIO(0xc5104)
-#define PCH_GMBUS2             _MMIO(0xc5108)
-#define PCH_GMBUS3             _MMIO(0xc510c)
-#define PCH_GMBUS4             _MMIO(0xc5110)
-#define PCH_GMBUS5             _MMIO(0xc5120)
-
 #define _PCH_DPLL_A              0xc6014
 #define _PCH_DPLL_B              0xc6018
 #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
@@ -8498,8 +8480,10 @@ enum {
 #define  GEN6_PM_RP_DOWN_THRESHOLD             (1 << 4)
 #define  GEN6_PM_RP_UP_EI_EXPIRED              (1 << 2)
 #define  GEN6_PM_RP_DOWN_EI_EXPIRED            (1 << 1)
-#define  GEN6_PM_RPS_EVENTS                    (GEN6_PM_RP_UP_THRESHOLD | \
-                                                GEN6_PM_RP_DOWN_THRESHOLD | \
+#define  GEN6_PM_RPS_EVENTS                    (GEN6_PM_RP_UP_EI_EXPIRED   | \
+                                                GEN6_PM_RP_UP_THRESHOLD    | \
+                                                GEN6_PM_RP_DOWN_EI_EXPIRED | \
+                                                GEN6_PM_RP_DOWN_THRESHOLD  | \
                                                 GEN6_PM_RP_DOWN_TIMEOUT)
 
 #define GEN7_GT_SCRATCH(i)                     _MMIO(0x4F100 + (i) * 4)
@@ -8827,46 +8811,78 @@ enum {
 #define HSW_AUD_CHICKENBIT                     _MMIO(0x65f10)
 #define   SKL_AUD_CODEC_WAKE_SIGNAL            (1 << 15)
 
-/* HSW Power Wells */
-#define _HSW_PWR_WELL_CTL1                     0x45400
-#define _HSW_PWR_WELL_CTL2                     0x45404
-#define _HSW_PWR_WELL_CTL3                     0x45408
-#define _HSW_PWR_WELL_CTL4                     0x4540C
-
-#define _ICL_PWR_WELL_CTL_AUX1                 0x45440
-#define _ICL_PWR_WELL_CTL_AUX2                 0x45444
-#define _ICL_PWR_WELL_CTL_AUX4                 0x4544C
-
-#define _ICL_PWR_WELL_CTL_DDI1                 0x45450
-#define _ICL_PWR_WELL_CTL_DDI2                 0x45454
-#define _ICL_PWR_WELL_CTL_DDI4                 0x4545C
-
 /*
- * Each power well control register contains up to 16 (request, status) HW
- * flag tuples. The register index and HW flag shift is determined by the
- * power well ID (see i915_power_well_id). There are 4 possible sources of
- * power well requests each source having its own set of control registers:
- * BIOS, DRIVER, KVMR, DEBUG.
+ * HSW - ICL power wells
+ *
+ * Platforms have up to 3 power well control register sets, each set
+ * controlling up to 16 power wells via a request/status HW flag tuple:
+ * - main (HSW_PWR_WELL_CTL[1-4])
+ * - AUX  (ICL_PWR_WELL_CTL_AUX[1-4])
+ * - DDI  (ICL_PWR_WELL_CTL_DDI[1-4])
+ * Each control register set consists of up to 4 registers used by different
+ * sources that can request a power well to be enabled:
+ * - BIOS   (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
+ * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
+ * - KVMR   (HSW_PWR_WELL_CTL3)   (only in the main register set)
+ * - DEBUG  (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
  */
-#define _HSW_PW_REG_IDX(pw)                    ((pw) >> 4)
-#define _HSW_PW_SHIFT(pw)                      (((pw) & 0xf) * 2)
-#define HSW_PWR_WELL_CTL_BIOS(pw)      _MMIO(_PICK(_HSW_PW_REG_IDX(pw),       \
-                                                   _HSW_PWR_WELL_CTL1,        \
-                                                   _ICL_PWR_WELL_CTL_AUX1,    \
-                                                   _ICL_PWR_WELL_CTL_DDI1))
-#define HSW_PWR_WELL_CTL_DRIVER(pw)    _MMIO(_PICK(_HSW_PW_REG_IDX(pw),       \
-                                                   _HSW_PWR_WELL_CTL2,        \
-                                                   _ICL_PWR_WELL_CTL_AUX2,    \
-                                                   _ICL_PWR_WELL_CTL_DDI2))
-/* KVMR doesn't have a reg for AUX or DDI power well control */
-#define HSW_PWR_WELL_CTL_KVMR          _MMIO(_HSW_PWR_WELL_CTL3)
-#define HSW_PWR_WELL_CTL_DEBUG(pw)     _MMIO(_PICK(_HSW_PW_REG_IDX(pw),       \
-                                                   _HSW_PWR_WELL_CTL4,        \
-                                                   _ICL_PWR_WELL_CTL_AUX4,    \
-                                                   _ICL_PWR_WELL_CTL_DDI4))
-
-#define   HSW_PWR_WELL_CTL_REQ(pw)             (1 << (_HSW_PW_SHIFT(pw) + 1))
-#define   HSW_PWR_WELL_CTL_STATE(pw)           (1 << _HSW_PW_SHIFT(pw))
+#define HSW_PWR_WELL_CTL1                      _MMIO(0x45400)
+#define HSW_PWR_WELL_CTL2                      _MMIO(0x45404)
+#define HSW_PWR_WELL_CTL3                      _MMIO(0x45408)
+#define HSW_PWR_WELL_CTL4                      _MMIO(0x4540C)
+#define   HSW_PWR_WELL_CTL_REQ(pw_idx)         (0x2 << ((pw_idx) * 2))
+#define   HSW_PWR_WELL_CTL_STATE(pw_idx)       (0x1 << ((pw_idx) * 2))
+
+/* HSW/BDW power well */
+#define   HSW_PW_CTL_IDX_GLOBAL                        15
+
+/* SKL/BXT/GLK/CNL power wells */
+#define   SKL_PW_CTL_IDX_PW_2                  15
+#define   SKL_PW_CTL_IDX_PW_1                  14
+#define   CNL_PW_CTL_IDX_AUX_F                 12
+#define   CNL_PW_CTL_IDX_AUX_D                 11
+#define   GLK_PW_CTL_IDX_AUX_C                 10
+#define   GLK_PW_CTL_IDX_AUX_B                 9
+#define   GLK_PW_CTL_IDX_AUX_A                 8
+#define   CNL_PW_CTL_IDX_DDI_F                 6
+#define   SKL_PW_CTL_IDX_DDI_D                 4
+#define   SKL_PW_CTL_IDX_DDI_C                 3
+#define   SKL_PW_CTL_IDX_DDI_B                 2
+#define   SKL_PW_CTL_IDX_DDI_A_E               1
+#define   GLK_PW_CTL_IDX_DDI_A                 1
+#define   SKL_PW_CTL_IDX_MISC_IO               0
+
+/* ICL - power wells */
+#define   ICL_PW_CTL_IDX_PW_4                  3
+#define   ICL_PW_CTL_IDX_PW_3                  2
+#define   ICL_PW_CTL_IDX_PW_2                  1
+#define   ICL_PW_CTL_IDX_PW_1                  0
+
+#define ICL_PWR_WELL_CTL_AUX1                  _MMIO(0x45440)
+#define ICL_PWR_WELL_CTL_AUX2                  _MMIO(0x45444)
+#define ICL_PWR_WELL_CTL_AUX4                  _MMIO(0x4544C)
+#define   ICL_PW_CTL_IDX_AUX_TBT4              11
+#define   ICL_PW_CTL_IDX_AUX_TBT3              10
+#define   ICL_PW_CTL_IDX_AUX_TBT2              9
+#define   ICL_PW_CTL_IDX_AUX_TBT1              8
+#define   ICL_PW_CTL_IDX_AUX_F                 5
+#define   ICL_PW_CTL_IDX_AUX_E                 4
+#define   ICL_PW_CTL_IDX_AUX_D                 3
+#define   ICL_PW_CTL_IDX_AUX_C                 2
+#define   ICL_PW_CTL_IDX_AUX_B                 1
+#define   ICL_PW_CTL_IDX_AUX_A                 0
+
+#define ICL_PWR_WELL_CTL_DDI1                  _MMIO(0x45450)
+#define ICL_PWR_WELL_CTL_DDI2                  _MMIO(0x45454)
+#define ICL_PWR_WELL_CTL_DDI4                  _MMIO(0x4545C)
+#define   ICL_PW_CTL_IDX_DDI_F                 5
+#define   ICL_PW_CTL_IDX_DDI_E                 4
+#define   ICL_PW_CTL_IDX_DDI_D                 3
+#define   ICL_PW_CTL_IDX_DDI_C                 2
+#define   ICL_PW_CTL_IDX_DDI_B                 1
+#define   ICL_PW_CTL_IDX_DDI_A                 0
+
+/* HSW - power well misc debug registers */
 #define HSW_PWR_WELL_CTL5                      _MMIO(0x45410)
 #define   HSW_PWR_WELL_ENABLE_SINGLE_STEP      (1 << 31)
 #define   HSW_PWR_WELL_PWR_GATE_OVERRIDE       (1 << 20)
@@ -8878,22 +8894,32 @@ enum skl_power_gate {
        SKL_PG0,
        SKL_PG1,
        SKL_PG2,
+       ICL_PG3,
+       ICL_PG4,
 };
 
 #define SKL_FUSE_STATUS                                _MMIO(0x42000)
 #define  SKL_FUSE_DOWNLOAD_STATUS              (1 << 31)
-/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
-#define  SKL_PW_TO_PG(pw)                      ((pw) - SKL_DISP_PW_1 + SKL_PG1)
-/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */
-#define  ICL_PW_TO_PG(pw)                      ((pw) - ICL_DISP_PW_1 + SKL_PG1)
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2
+ */
+#define  SKL_PW_CTL_IDX_TO_PG(pw_idx)          \
+       ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1)
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4
+ */
+#define  ICL_PW_CTL_IDX_TO_PG(pw_idx)          \
+       ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
 #define  SKL_FUSE_PG_DIST_STATUS(pg)           (1 << (27 - (pg)))
 
-#define _CNL_AUX_REG_IDX(pw)           ((pw) - 9)
+#define _CNL_AUX_REG_IDX(pw_idx)       ((pw_idx) - GLK_PW_CTL_IDX_AUX_B)
 #define _CNL_AUX_ANAOVRD1_B            0x162250
 #define _CNL_AUX_ANAOVRD1_C            0x162210
 #define _CNL_AUX_ANAOVRD1_D            0x1622D0
 #define _CNL_AUX_ANAOVRD1_F            0x162A90
-#define CNL_AUX_ANAOVRD1(pw)           _MMIO(_PICK(_CNL_AUX_REG_IDX(pw), \
+#define CNL_AUX_ANAOVRD1(pw_idx)       _MMIO(_PICK(_CNL_AUX_REG_IDX(pw_idx), \
                                                    _CNL_AUX_ANAOVRD1_B, \
                                                    _CNL_AUX_ANAOVRD1_C, \
                                                    _CNL_AUX_ANAOVRD1_D, \
@@ -9367,9 +9393,13 @@ enum skl_power_gate {
 #define   MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK       (0x1 << 16)
 #define   MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x)       ((x) << 14)
 #define   MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK     (0x3 << 14)
-#define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x)           ((x) << 12)
 #define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK         (0x3 << 12)
+#define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2            (0 << 12)
+#define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3            (1 << 12)
+#define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5            (2 << 12)
+#define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7            (3 << 12)
 #define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x)           ((x) << 8)
+#define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT                8
 #define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK         (0xf << 8)
 #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
                                             _MG_CLKTOP2_HSCLKCTL_PORT1, \
@@ -9380,7 +9410,10 @@ enum skl_power_gate {
 #define _MG_PLL_DIV0_PORT3                             0x16AA00
 #define _MG_PLL_DIV0_PORT4                             0x16BA00
 #define   MG_PLL_DIV0_FRACNEN_H                                (1 << 30)
+#define   MG_PLL_DIV0_FBDIV_FRAC_MASK                  (0x3fffff << 8)
+#define   MG_PLL_DIV0_FBDIV_FRAC_SHIFT                 8
 #define   MG_PLL_DIV0_FBDIV_FRAC(x)                    ((x) << 8)
+#define   MG_PLL_DIV0_FBDIV_INT_MASK                   (0xff << 0)
 #define   MG_PLL_DIV0_FBDIV_INT(x)                     ((x) << 0)
 #define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \
                                     _MG_PLL_DIV0_PORT2)
@@ -9395,6 +9428,7 @@ enum skl_power_gate {
 #define   MG_PLL_DIV1_DITHER_DIV_4                     (2 << 12)
 #define   MG_PLL_DIV1_DITHER_DIV_8                     (3 << 12)
 #define   MG_PLL_DIV1_NDIVRATIO(x)                     ((x) << 4)
+#define   MG_PLL_DIV1_FBPREDIV_MASK                    (0xf << 0)
 #define   MG_PLL_DIV1_FBPREDIV(x)                      ((x) << 0)
 #define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \
                                     _MG_PLL_DIV1_PORT2)
@@ -10347,8 +10381,8 @@ enum skl_power_gate {
 #define  ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN      (1 << 23)
 
 /* Icelake Display Stream Compression Registers */
-#define DSCA_PICTURE_PARAMETER_SET_0           0x6B200
-#define DSCC_PICTURE_PARAMETER_SET_0           0x6BA00
+#define DSCA_PICTURE_PARAMETER_SET_0           _MMIO(0x6B200)
+#define DSCC_PICTURE_PARAMETER_SET_0           _MMIO(0x6BA00)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB   0x78270
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB   0x78370
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC   0x78470
@@ -10368,8 +10402,8 @@ enum skl_power_gate {
 #define  DSC_VER_MIN_SHIFT             4
 #define  DSC_VER_MAJ                   (0x1 << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_1           0x6B204
-#define DSCC_PICTURE_PARAMETER_SET_1           0x6BA04
+#define DSCA_PICTURE_PARAMETER_SET_1           _MMIO(0x6B204)
+#define DSCC_PICTURE_PARAMETER_SET_1           _MMIO(0x6BA04)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB   0x78274
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB   0x78374
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC   0x78474
@@ -10382,8 +10416,8 @@ enum skl_power_gate {
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
 #define  DSC_BPP(bpp)                          ((bpp) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_2           0x6B208
-#define DSCC_PICTURE_PARAMETER_SET_2           0x6BA08
+#define DSCA_PICTURE_PARAMETER_SET_2           _MMIO(0x6B208)
+#define DSCC_PICTURE_PARAMETER_SET_2           _MMIO(0x6BA08)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB   0x78278
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB   0x78378
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC   0x78478
@@ -10397,8 +10431,8 @@ enum skl_power_gate {
 #define  DSC_PIC_WIDTH(pic_width)      ((pic_width) << 16)
 #define  DSC_PIC_HEIGHT(pic_height)    ((pic_height) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_3           0x6B20C
-#define DSCC_PICTURE_PARAMETER_SET_3           0x6BA0C
+#define DSCA_PICTURE_PARAMETER_SET_3           _MMIO(0x6B20C)
+#define DSCC_PICTURE_PARAMETER_SET_3           _MMIO(0x6BA0C)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB   0x7827C
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB   0x7837C
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC   0x7847C
@@ -10412,8 +10446,8 @@ enum skl_power_gate {
 #define  DSC_SLICE_WIDTH(slice_width)   ((slice_width) << 16)
 #define  DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_4           0x6B210
-#define DSCC_PICTURE_PARAMETER_SET_4           0x6BA10
+#define DSCA_PICTURE_PARAMETER_SET_4           _MMIO(0x6B210)
+#define DSCC_PICTURE_PARAMETER_SET_4           _MMIO(0x6BA10)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB   0x78280
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB   0x78380
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC   0x78480
@@ -10422,13 +10456,13 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
 #define  DSC_INITIAL_DEC_DELAY(dec_delay)       ((dec_delay) << 16)
 #define  DSC_INITIAL_XMIT_DELAY(xmit_delay)     ((xmit_delay) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_5           0x6B214
-#define DSCC_PICTURE_PARAMETER_SET_5           0x6BA14
+#define DSCA_PICTURE_PARAMETER_SET_5           _MMIO(0x6B214)
+#define DSCC_PICTURE_PARAMETER_SET_5           _MMIO(0x6BA14)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB   0x78284
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB   0x78384
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC   0x78484
@@ -10437,13 +10471,13 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
-#define  DSC_SCALE_DEC_INTINT(scale_dec)       ((scale_dec) << 16)
+#define  DSC_SCALE_DEC_INT(scale_dec)  ((scale_dec) << 16)
 #define  DSC_SCALE_INC_INT(scale_inc)          ((scale_inc) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_6           0x6B218
-#define DSCC_PICTURE_PARAMETER_SET_6           0x6BA18
+#define DSCA_PICTURE_PARAMETER_SET_6           _MMIO(0x6B218)
+#define DSCC_PICTURE_PARAMETER_SET_6           _MMIO(0x6BA18)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB   0x78288
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB   0x78388
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC   0x78488
@@ -10454,13 +10488,13 @@ enum skl_power_gate {
 #define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
-#define  DSC_FLATNESS_MAX_QP(max_qp)           (qp << 24)
-#define  DSC_FLATNESS_MIN_QP(min_qp)           (qp << 16)
+#define  DSC_FLATNESS_MAX_QP(max_qp)           ((max_qp) << 24)
+#define  DSC_FLATNESS_MIN_QP(min_qp)           ((min_qp) << 16)
 #define  DSC_FIRST_LINE_BPG_OFFSET(offset)     ((offset) << 8)
 #define  DSC_INITIAL_SCALE_VALUE(value)                ((value) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_7           0x6B21C
-#define DSCC_PICTURE_PARAMETER_SET_7           0x6BA1C
+#define DSCA_PICTURE_PARAMETER_SET_7           _MMIO(0x6B21C)
+#define DSCC_PICTURE_PARAMETER_SET_7           _MMIO(0x6BA1C)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB   0x7828C
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB   0x7838C
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC   0x7848C
@@ -10474,8 +10508,8 @@ enum skl_power_gate {
 #define  DSC_NFL_BPG_OFFSET(bpg_offset)                ((bpg_offset) << 16)
 #define  DSC_SLICE_BPG_OFFSET(bpg_offset)      ((bpg_offset) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_8           0x6B220
-#define DSCC_PICTURE_PARAMETER_SET_8           0x6BA20
+#define DSCA_PICTURE_PARAMETER_SET_8           _MMIO(0x6B220)
+#define DSCC_PICTURE_PARAMETER_SET_8           _MMIO(0x6BA20)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB   0x78290
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB   0x78390
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC   0x78490
@@ -10489,8 +10523,8 @@ enum skl_power_gate {
 #define  DSC_INITIAL_OFFSET(initial_offset)            ((initial_offset) << 16)
 #define  DSC_FINAL_OFFSET(final_offset)                        ((final_offset) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_9           0x6B224
-#define DSCC_PICTURE_PARAMETER_SET_9           0x6BA24
+#define DSCA_PICTURE_PARAMETER_SET_9           _MMIO(0x6B224)
+#define DSCC_PICTURE_PARAMETER_SET_9           _MMIO(0x6BA24)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB   0x78294
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB   0x78394
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC   0x78494
@@ -10504,8 +10538,8 @@ enum skl_power_gate {
 #define  DSC_RC_EDGE_FACTOR(rc_edge_fact)      ((rc_edge_fact) << 16)
 #define  DSC_RC_MODEL_SIZE(rc_model_size)      ((rc_model_size) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_10          0x6B228
-#define DSCC_PICTURE_PARAMETER_SET_10          0x6BA28
+#define DSCA_PICTURE_PARAMETER_SET_10          _MMIO(0x6B228)
+#define DSCC_PICTURE_PARAMETER_SET_10          _MMIO(0x6BA28)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB  0x78298
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB  0x78398
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC  0x78498
@@ -10521,8 +10555,8 @@ enum skl_power_gate {
 #define  DSC_RC_QUANT_INC_LIMIT1(lim)                  ((lim) << 8)
 #define  DSC_RC_QUANT_INC_LIMIT0(lim)                  ((lim) << 0)
 
-#define DSCA_PICTURE_PARAMETER_SET_11          0x6B22C
-#define DSCC_PICTURE_PARAMETER_SET_11          0x6BA2C
+#define DSCA_PICTURE_PARAMETER_SET_11          _MMIO(0x6B22C)
+#define DSCC_PICTURE_PARAMETER_SET_11          _MMIO(0x6BA2C)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB  0x7829C
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB  0x7839C
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC  0x7849C
@@ -10534,8 +10568,8 @@ enum skl_power_gate {
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
 
-#define DSCA_PICTURE_PARAMETER_SET_12          0x6B260
-#define DSCC_PICTURE_PARAMETER_SET_12          0x6BA60
+#define DSCA_PICTURE_PARAMETER_SET_12          _MMIO(0x6B260)
+#define DSCC_PICTURE_PARAMETER_SET_12          _MMIO(0x6BA60)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB  0x782A0
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB  0x783A0
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC  0x784A0
@@ -10547,8 +10581,8 @@ enum skl_power_gate {
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
 
-#define DSCA_PICTURE_PARAMETER_SET_13          0x6B264
-#define DSCC_PICTURE_PARAMETER_SET_13          0x6BA64
+#define DSCA_PICTURE_PARAMETER_SET_13          _MMIO(0x6B264)
+#define DSCC_PICTURE_PARAMETER_SET_13          _MMIO(0x6BA64)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB  0x782A4
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB  0x783A4
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC  0x784A4
@@ -10560,8 +10594,8 @@ enum skl_power_gate {
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
 
-#define DSCA_PICTURE_PARAMETER_SET_14          0x6B268
-#define DSCC_PICTURE_PARAMETER_SET_14          0x6BA68
+#define DSCA_PICTURE_PARAMETER_SET_14          _MMIO(0x6B268)
+#define DSCC_PICTURE_PARAMETER_SET_14          _MMIO(0x6BA68)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB  0x782A8
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB  0x783A8
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC  0x784A8
@@ -10573,8 +10607,8 @@ enum skl_power_gate {
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
 
-#define DSCA_PICTURE_PARAMETER_SET_15          0x6B26C
-#define DSCC_PICTURE_PARAMETER_SET_15          0x6BA6C
+#define DSCA_PICTURE_PARAMETER_SET_15          _MMIO(0x6B26C)
+#define DSCC_PICTURE_PARAMETER_SET_15          _MMIO(0x6BA6C)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB  0x782AC
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB  0x783AC
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC  0x784AC
@@ -10586,8 +10620,8 @@ enum skl_power_gate {
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
 
-#define DSCA_PICTURE_PARAMETER_SET_16          0x6B270
-#define DSCC_PICTURE_PARAMETER_SET_16          0x6BA70
+#define DSCA_PICTURE_PARAMETER_SET_16          _MMIO(0x6B270)
+#define DSCC_PICTURE_PARAMETER_SET_16          _MMIO(0x6BA70)
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB  0x782B0
 #define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB  0x783B0
 #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC  0x784B0
@@ -10599,7 +10633,7 @@ enum skl_power_gate {
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
 #define  DSC_SLICE_PER_LINE(slice_per_line)            ((slice_per_line) << 16)
-#define  DSC_SLICE_CHUNK_SIZE(slice_chunk_aize)                (slice_chunk_size << 0)
+#define  DSC_SLICE_CHUNK_SIZE(slice_chunk_size)                ((slice_chunk_size) << 0)
 
 /* Icelake Rate Control Buffer Threshold Registers */
 #define DSCA_RC_BUF_THRESH_0                   _MMIO(0x6B230)
@@ -10652,4 +10686,17 @@ enum skl_power_gate {
                                                _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
                                                _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
 
+#define PORT_TX_DFLEXDPSP                      _MMIO(0x1638A0)
+#define   TC_LIVE_STATE_TBT(tc_port)           (1 << ((tc_port) * 8 + 6))
+#define   TC_LIVE_STATE_TC(tc_port)            (1 << ((tc_port) * 8 + 5))
+#define   DP_LANE_ASSIGNMENT_SHIFT(tc_port)    ((tc_port) * 8)
+#define   DP_LANE_ASSIGNMENT_MASK(tc_port)     (0xf << ((tc_port) * 8))
+#define   DP_LANE_ASSIGNMENT(tc_port, x)       ((x) << ((tc_port) * 8))
+
+#define PORT_TX_DFLEXDPPMS                             _MMIO(0x163890)
+#define   DP_PHY_MODE_STATUS_COMPLETED(tc_port)                (1 << (tc_port))
+
+#define PORT_TX_DFLEXDPCSSS                            _MMIO(0x163894)
+#define   DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)         (1 << (tc_port))
+
 #endif /* _I915_REG_H_ */
index 5c2c93cbab12f8ebff29507a00953a24a9a877c6..09ed48833b5496a9c5b6501ed128cf4bfd504167 100644 (file)
@@ -527,7 +527,7 @@ void __i915_request_submit(struct i915_request *request)
 
        seqno = timeline_get_seqno(&engine->timeline);
        GEM_BUG_ON(!seqno);
-       GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+       GEM_BUG_ON(intel_engine_signaled(engine, seqno));
 
        /* We may be recursing from the signal callback of another i915 fence */
        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -579,8 +579,7 @@ void __i915_request_unsubmit(struct i915_request *request)
         */
        GEM_BUG_ON(!request->global_seqno);
        GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
-       GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
-                                    request->global_seqno));
+       GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
        engine->timeline.seqno--;
 
        /* We may be recursing from the signal callback of another i915 fence */
@@ -1205,7 +1204,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
         * it is a fair assumption that it will not complete within our
         * relatively short timeout.
         */
-       if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
+       if (!intel_engine_has_started(engine, seqno))
                return false;
 
        /*
@@ -1222,7 +1221,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
        irq = READ_ONCE(engine->breadcrumbs.irq_count);
        timeout_us += local_clock_us(&cpu);
        do {
-               if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
+               if (intel_engine_has_completed(engine, seqno))
                        return seqno == i915_request_global_seqno(rq);
 
                /*
index e1c9365dfefb1ef9ddf80183ecea07185b6d7ed4..9898301ab7ef5e3dc096e13cc0d24ddcef35cb41 100644 (file)
@@ -272,7 +272,10 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
 #define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+                                           u32 seqno);
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+                                             u32 seqno);
 
 /**
  * Returns true if seq1 is later than seq2.
@@ -282,11 +285,31 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
        return (s32)(seq1 - seq2) >= 0;
 }
 
+/**
+ * i915_request_started - check if the request has begun being executed
+ * @rq: the request
+ *
+ * Returns true if the request has been submitted to hardware, and the hardware
+ * has advanced passed the end of the previous request and so should be either
+ * currently processing the request (though it may be preempted and so
+ * not necessarily the next request to complete) or have completed the request.
+ */
+static inline bool i915_request_started(const struct i915_request *rq)
+{
+       u32 seqno;
+
+       seqno = i915_request_global_seqno(rq);
+       if (!seqno) /* not yet submitted to HW */
+               return false;
+
+       return intel_engine_has_started(rq->engine, seqno);
+}
+
 static inline bool
 __i915_request_completed(const struct i915_request *rq, u32 seqno)
 {
        GEM_BUG_ON(!seqno);
-       return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+       return intel_engine_has_completed(rq->engine, seqno) &&
                seqno == i915_request_global_seqno(rq);
 }
 
@@ -301,18 +324,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
        return __i915_request_completed(rq, seqno);
 }
 
-static inline bool i915_request_started(const struct i915_request *rq)
-{
-       u32 seqno;
-
-       seqno = i915_request_global_seqno(rq);
-       if (!seqno)
-               return false;
-
-       return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
-                                seqno - 1);
-}
-
 static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
 {
        const struct i915_request *rq =
index 98358b4b36dea7e13177bdf38554ffaad4f994e9..31efc971a3a83897b545c49142fc0e1366641521 100644 (file)
@@ -405,7 +405,7 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
        i915_vma_unpin(vma);
 }
 
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
 {
        struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
@@ -420,6 +420,9 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
        i915_vma_unpin(vma);
        i915_vma_close(vma);
 
+       if (flags & I915_VMA_RELEASE_MAP)
+               i915_gem_object_unpin_map(obj);
+
        __i915_gem_object_release_unless_active(obj);
 }
 
index f06d663771070a66cb3df27f81d7f58298bb3708..4f7c1c7599f43c3590c99b3b8e122d87c4cc8030 100644 (file)
@@ -138,7 +138,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
                  struct i915_address_space *vm,
                  const struct i915_ggtt_view *view);
 
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
+#define I915_VMA_RELEASE_MAP BIT(0)
 
 static inline bool i915_vma_is_active(struct i915_vma *vma)
 {
@@ -207,6 +208,11 @@ static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
        return lower_32_bits(vma->node.start);
 }
 
+static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
+{
+       return i915_vm_to_ggtt(vma->vm)->pin_bias;
+}
+
 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
 {
        i915_gem_object_get(vma->obj);
@@ -245,6 +251,8 @@ i915_vma_compare(struct i915_vma *vma,
        if (cmp)
                return cmp;
 
+       assert_i915_gem_gtt_types();
+
        /* ggtt_view.type also encodes its size so that we both distinguish
         * different views using it as a "type" and also use a compact (no
         * accessing of uninitialised padding bytes) memcmp without storing
index dcba645cabb87db8fbd7a0eb85e710e1b48691c5..fa7df5fe154bf06bdfc5e6eea12484e80db9de9d 100644 (file)
@@ -159,7 +159,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
        }
 
        intel_state->base.visible = false;
-       ret = intel_plane->check_plane(intel_plane, crtc_state, intel_state);
+       ret = intel_plane->check_plane(crtc_state, intel_state);
        if (ret)
                return ret;
 
@@ -170,7 +170,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
        if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
            adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
                if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
-                   state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
+                   state->fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+                   state->fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+                   state->fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
                        DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
                        return -EINVAL;
                }
index 1db6ba7d926ee3b27b0c00e9bf82f956173e8b59..84bf8d827136dc28515216df4bec41528024d9e5 100644 (file)
@@ -256,8 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
        spin_unlock(&b->irq_lock);
 
        rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
-               GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
-                                             wait->seqno));
+               GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
                RB_CLEAR_NODE(&wait->node);
                wake_up_process(wait->tsk);
        }
@@ -508,8 +507,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
                return armed;
 
        /* Make the caller recheck if its request has already started. */
-       return i915_seqno_passed(intel_engine_get_seqno(engine),
-                                wait->seqno - 1);
+       return intel_engine_has_started(engine, wait->seqno);
 }
 
 static inline bool chain_wakeup(struct rb_node *rb, int priority)
index cf9b600cca79f22adbe01ecf05fede19c5560392..14cf4c367e368ae4549c03a439a84d55d9882bf7 100644 (file)
@@ -55,7 +55,9 @@ MODULE_FIRMWARE(I915_CSR_BXT);
 #define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
 
-#define CSR_MAX_FW_SIZE                        0x2FFF
+#define BXT_CSR_MAX_FW_SIZE            0x3000
+#define GLK_CSR_MAX_FW_SIZE            0x4000
+#define ICL_CSR_MAX_FW_SIZE            0x6000
 #define CSR_DEFAULT_FW_OFFSET          0xFFFFFFFF
 
 struct intel_css_header {
@@ -279,6 +281,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
        struct intel_csr *csr = &dev_priv->csr;
        const struct stepping_info *si = intel_get_stepping_info(dev_priv);
        uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+       uint32_t max_fw_size = 0;
        uint32_t i;
        uint32_t *dmc_payload;
        uint32_t required_version;
@@ -359,6 +362,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
                          si->stepping);
                return NULL;
        }
+       /* Convert dmc_offset into number of bytes. By default it is in dwords*/
+       dmc_offset *= 4;
        readcount += dmc_offset;
 
        /* Extract dmc_header information. */
@@ -391,8 +396,16 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
        nbytes = dmc_header->fw_size * 4;
-       if (nbytes > CSR_MAX_FW_SIZE) {
-               DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes);
+       if (INTEL_GEN(dev_priv) >= 11)
+               max_fw_size = ICL_CSR_MAX_FW_SIZE;
+       else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+               max_fw_size = GLK_CSR_MAX_FW_SIZE;
+       else if (IS_GEN9(dev_priv))
+               max_fw_size = BXT_CSR_MAX_FW_SIZE;
+       else
+               MISSING_CASE(INTEL_REVID(dev_priv));
+       if (nbytes > max_fw_size) {
+               DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
                return NULL;
        }
        csr->dmc_fw_size = dmc_header->fw_size;
@@ -468,12 +481,6 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
                csr->fw_path = I915_CSR_SKL;
        else if (IS_BROXTON(dev_priv))
                csr->fw_path = I915_CSR_BXT;
-       else {
-               DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
-               return;
-       }
-
-       DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
 
        /*
         * Obtain a runtime pm reference, until CSR is loaded,
@@ -481,6 +488,14 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
         */
        intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
+       if (csr->fw_path == NULL) {
+               DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
+               WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
+
+               return;
+       }
+
+       DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
        schedule_work(&dev_priv->csr.work);
 }
 
index 8761513f3532c5c4bfb56151833ab6217a5ab99f..cd01a09c5e0f529eac25f49863ab3f2d3bc9b017 100644 (file)
@@ -1414,7 +1414,7 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
                break;
        }
 
-       ref_clock = dev_priv->cdclk.hw.ref;
+       ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
 
        dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
 
@@ -1427,6 +1427,81 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
        return dco_freq / (p0 * p1 * p2 * 5);
 }
 
+static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
+                                enum port port)
+{
+       u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+
+       switch (val) {
+       case DDI_CLK_SEL_NONE:
+               return 0;
+       case DDI_CLK_SEL_TBT_162:
+               return 162000;
+       case DDI_CLK_SEL_TBT_270:
+               return 270000;
+       case DDI_CLK_SEL_TBT_540:
+               return 540000;
+       case DDI_CLK_SEL_TBT_810:
+               return 810000;
+       default:
+               MISSING_CASE(val);
+               return 0;
+       }
+}
+
+static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
+                               enum port port)
+{
+       u32 mg_pll_div0, mg_clktop_hsclkctl;
+       u32 m1, m2_int, m2_frac, div1, div2, refclk;
+       u64 tmp;
+
+       refclk = dev_priv->cdclk.hw.ref;
+
+       mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
+       mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+
+       m1 = I915_READ(MG_PLL_DIV1(port)) & MG_PLL_DIV1_FBPREDIV_MASK;
+       m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+       m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
+                 (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
+                 MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+
+       switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+       case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+               div1 = 2;
+               break;
+       case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+               div1 = 3;
+               break;
+       case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+               div1 = 5;
+               break;
+       case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+               div1 = 7;
+               break;
+       default:
+               MISSING_CASE(mg_clktop_hsclkctl);
+               return 0;
+       }
+
+       div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+               MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+       /* div2 value of 0 is same as 1 means no div */
+       if (div2 == 0)
+               div2 = 1;
+
+       /*
+        * Adjust the original formula to delay the division by 2^22 in order to
+        * minimize possible rounding errors.
+        */
+       tmp = (u64)m1 * m2_int * refclk +
+             (((u64)m1 * m2_frac * refclk) >> 22);
+       tmp = div_u64(tmp, 5 * div1 * div2);
+
+       return tmp;
+}
+
 static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
 {
        int dotclock;
@@ -1467,8 +1542,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
                        link_clock = icl_calc_dp_combo_pll_link(dev_priv,
                                                                pll_id);
        } else {
-               /* FIXME - Add for MG PLL */
-               WARN(1, "MG PLL clock_get code not implemented yet\n");
+               if (pll_id == DPLL_ID_ICL_TBTPLL)
+                       link_clock = icl_calc_tbt_pll_link(dev_priv, port);
+               else
+                       link_clock = icl_calc_mg_pll_link(dev_priv, port);
        }
 
        pipe_config->port_clock = link_clock;
@@ -2468,7 +2545,128 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
        I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 }
 
-static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level,
+static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
+                                          int link_clock,
+                                          u32 level)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = encoder->port;
+       const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
+       u32 n_entries, val;
+       int ln;
+
+       n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+       ddi_translations = icl_mg_phy_ddi_translations;
+       /* The table does not have values for level 3 and level 9. */
+       if (level >= n_entries || level == 3 || level == 9) {
+               DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.",
+                             level, n_entries - 2);
+               level = n_entries - 2;
+       }
+
+       /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
+       for (ln = 0; ln < 2; ln++) {
+               val = I915_READ(MG_TX1_LINK_PARAMS(port, ln));
+               val &= ~CRI_USE_FS32;
+               I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val);
+
+               val = I915_READ(MG_TX2_LINK_PARAMS(port, ln));
+               val &= ~CRI_USE_FS32;
+               I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val);
+       }
+
+       /* Program MG_TX_SWINGCTRL with values from vswing table */
+       for (ln = 0; ln < 2; ln++) {
+               val = I915_READ(MG_TX1_SWINGCTRL(port, ln));
+               val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
+               val |= CRI_TXDEEMPH_OVERRIDE_17_12(
+                       ddi_translations[level].cri_txdeemph_override_17_12);
+               I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val);
+
+               val = I915_READ(MG_TX2_SWINGCTRL(port, ln));
+               val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
+               val |= CRI_TXDEEMPH_OVERRIDE_17_12(
+                       ddi_translations[level].cri_txdeemph_override_17_12);
+               I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val);
+       }
+
+       /* Program MG_TX_DRVCTRL with values from vswing table */
+       for (ln = 0; ln < 2; ln++) {
+               val = I915_READ(MG_TX1_DRVCTRL(port, ln));
+               val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+                        CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
+               val |= CRI_TXDEEMPH_OVERRIDE_5_0(
+                       ddi_translations[level].cri_txdeemph_override_5_0) |
+                       CRI_TXDEEMPH_OVERRIDE_11_6(
+                               ddi_translations[level].cri_txdeemph_override_11_6) |
+                       CRI_TXDEEMPH_OVERRIDE_EN;
+               I915_WRITE(MG_TX1_DRVCTRL(port, ln), val);
+
+               val = I915_READ(MG_TX2_DRVCTRL(port, ln));
+               val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+                        CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
+               val |= CRI_TXDEEMPH_OVERRIDE_5_0(
+                       ddi_translations[level].cri_txdeemph_override_5_0) |
+                       CRI_TXDEEMPH_OVERRIDE_11_6(
+                               ddi_translations[level].cri_txdeemph_override_11_6) |
+                       CRI_TXDEEMPH_OVERRIDE_EN;
+               I915_WRITE(MG_TX2_DRVCTRL(port, ln), val);
+
+               /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
+       }
+
+       /*
+        * Program MG_CLKHUB<LN, port being used> with value from frequency table
+        * In case of Legacy mode on MG PHY, both TX1 and TX2 enabled so use the
+        * values from table for which TX1 and TX2 enabled.
+        */
+       for (ln = 0; ln < 2; ln++) {
+               val = I915_READ(MG_CLKHUB(port, ln));
+               if (link_clock < 300000)
+                       val |= CFG_LOW_RATE_LKREN_EN;
+               else
+                       val &= ~CFG_LOW_RATE_LKREN_EN;
+               I915_WRITE(MG_CLKHUB(port, ln), val);
+       }
+
+       /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
+       for (ln = 0; ln < 2; ln++) {
+               val = I915_READ(MG_TX1_DCC(port, ln));
+               val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
+               if (link_clock <= 500000) {
+                       val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
+               } else {
+                       val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
+                               CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
+               }
+               I915_WRITE(MG_TX1_DCC(port, ln), val);
+
+               val = I915_READ(MG_TX2_DCC(port, ln));
+               val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
+               if (link_clock <= 500000) {
+                       val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
+               } else {
+                       val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
+                               CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
+               }
+               I915_WRITE(MG_TX2_DCC(port, ln), val);
+       }
+
+       /* Program MG_TX_PISO_READLOAD with values from vswing table */
+       for (ln = 0; ln < 2; ln++) {
+               val = I915_READ(MG_TX1_PISO_READLOAD(port, ln));
+               val |= CRI_CALCINIT;
+               I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val);
+
+               val = I915_READ(MG_TX2_PISO_READLOAD(port, ln));
+               val |= CRI_CALCINIT;
+               I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val);
+       }
+}
+
+static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
+                                   int link_clock,
+                                   u32 level,
                                    enum intel_output_type type)
 {
        enum port port = encoder->port;
@@ -2476,8 +2674,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level,
        if (port == PORT_A || port == PORT_B)
                icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
        else
-               /* Not Implemented Yet */
-               WARN_ON(1);
+               icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
 }
 
 static uint32_t translate_signal_level(int signal_levels)
@@ -2512,7 +2709,8 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
        int level = intel_ddi_dp_level(intel_dp);
 
        if (IS_ICELAKE(dev_priv))
-               icl_ddi_vswing_sequence(encoder, level, encoder->type);
+               icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+                                       level, encoder->type);
        else if (IS_CANNONLAKE(dev_priv))
                cnl_ddi_vswing_sequence(encoder, level, encoder->type);
        else
@@ -2692,8 +2890,12 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 
        intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
+       icl_program_mg_dp_mode(intel_dp);
+       icl_disable_phy_clock_gating(dig_port);
+
        if (IS_ICELAKE(dev_priv))
-               icl_ddi_vswing_sequence(encoder, level, encoder->type);
+               icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+                                       level, encoder->type);
        else if (IS_CANNONLAKE(dev_priv))
                cnl_ddi_vswing_sequence(encoder, level, encoder->type);
        else if (IS_GEN9_LP(dev_priv))
@@ -2708,7 +2910,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
 
-       intel_ddi_enable_pipe_clock(crtc_state);
+       icl_enable_phy_clock_gating(dig_port);
+
+       if (!is_mst)
+               intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2728,7 +2933,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
        intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
        if (IS_ICELAKE(dev_priv))
-               icl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
+               icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+                                       level, INTEL_OUTPUT_HDMI);
        else if (IS_CANNONLAKE(dev_priv))
                cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
        else if (IS_GEN9_LP(dev_priv))
@@ -2810,14 +3016,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 
-       intel_ddi_disable_pipe_clock(old_crtc_state);
-
-       /*
-        * Power down sink before disabling the port, otherwise we end
-        * up getting interrupts from the sink on detecting link loss.
-        */
-       if (!is_mst)
+       if (!is_mst) {
+               intel_ddi_disable_pipe_clock(old_crtc_state);
+               /*
+                * Power down sink before disabling the port, otherwise we end
+                * up getting interrupts from the sink on detecting link loss.
+                */
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+       }
 
        intel_disable_ddi_buf(encoder);
 
index 633f9fbf72eab7787102d094f8f442799da4c401..6eecd64734d51808af700da8f7a41fa399fc0df8 100644 (file)
@@ -103,9 +103,9 @@ enum intel_platform {
        func(has_psr); \
        func(has_rc6); \
        func(has_rc6p); \
-       func(has_resource_streamer); \
        func(has_runtime_pm); \
        func(has_snoop); \
+       func(has_coherent_ggtt); \
        func(unfenced_needs_alignment); \
        func(cursor_needs_physical); \
        func(hws_needs_physical); \
index 775968fa204990ea97993a42a9f3a70e143bd02a..1bd14c61dab5fd8e5d51689241273dd03d0c0d3e 100644 (file)
@@ -2474,6 +2474,12 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
        }
 }
 
+bool is_ccs_modifier(u64 modifier)
+{
+       return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+              modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+}
+
 static int
 intel_fill_fb_info(struct drm_i915_private *dev_priv,
                   struct drm_framebuffer *fb)
@@ -2504,8 +2510,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                        return ret;
                }
 
-               if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-                    fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
+               if (is_ccs_modifier(fb->modifier) && i == 1) {
                        int hsub = fb->format->hsub;
                        int vsub = fb->format->vsub;
                        int tile_width, tile_height;
@@ -3055,8 +3060,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
         * CCS AUX surface doesn't have its own x/y offsets, we must make sure
         * they match with the main surface x/y offsets.
         */
-       if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-           fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+       if (is_ccs_modifier(fb->modifier)) {
                while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
                        if (offset == 0)
                                break;
@@ -3190,8 +3194,7 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
                ret = skl_check_nv12_aux_surface(plane_state);
                if (ret)
                        return ret;
-       } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-                  fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+       } else if (is_ccs_modifier(fb->modifier)) {
                ret = skl_check_ccs_aux_surface(plane_state);
                if (ret)
                        return ret;
@@ -3552,11 +3555,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
        case I915_FORMAT_MOD_Y_TILED:
                return PLANE_CTL_TILED_Y;
        case I915_FORMAT_MOD_Y_TILED_CCS:
-               return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
+               return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
        case I915_FORMAT_MOD_Yf_TILED:
                return PLANE_CTL_TILED_YF;
        case I915_FORMAT_MOD_Yf_TILED_CCS:
-               return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
+               return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
        default:
                MISSING_CASE(fb_modifier);
        }
@@ -5079,10 +5082,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->pcu_lock);
-               /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+               /*
+                * Wait for PCODE to finish disabling IPS. The BSpec specified
+                * 42ms timeout value leads to occasional timeouts so use 100ms
+                * instead.
+                */
                if (intel_wait_for_register(dev_priv,
                                            IPS_CTL, IPS_ENABLE, 0,
-                                           42))
+                                           100))
                        DRM_ERROR("Timed out waiting for IPS disable\n");
        } else {
                I915_WRITE(IPS_CTL, 0);
@@ -8799,13 +8806,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                fb->modifier = I915_FORMAT_MOD_X_TILED;
                break;
        case PLANE_CTL_TILED_Y:
-               if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
                        fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
                else
                        fb->modifier = I915_FORMAT_MOD_Y_TILED;
                break;
        case PLANE_CTL_TILED_YF:
-               if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
                        fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
                else
                        fb->modifier = I915_FORMAT_MOD_Yf_TILED;
@@ -8974,7 +8981,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
                I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
                     pipe_name(crtc->pipe));
 
-       I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
+       I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
                        "Display power well on\n");
        I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
        I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
@@ -9691,8 +9698,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
        return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
 }
 
-static int i845_check_cursor(struct intel_plane *plane,
-                            struct intel_crtc_state *crtc_state,
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
                             struct intel_plane_state *plane_state)
 {
        const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -9882,10 +9888,10 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
        return true;
 }
 
-static int i9xx_check_cursor(struct intel_plane *plane,
-                            struct intel_crtc_state *crtc_state,
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
                             struct intel_plane_state *plane_state)
 {
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        const struct drm_framebuffer *fb = plane_state->base.fb;
        enum pipe pipe = plane->pipe;
@@ -12739,7 +12745,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
         * down.
         */
        INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
-       schedule_work(&state->commit_work);
+       queue_work(system_highpri_wq, &state->commit_work);
 }
 
 static void intel_atomic_commit_work(struct work_struct *work)
@@ -12969,8 +12975,11 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
            INTEL_INFO(dev_priv)->cursor_needs_physical) {
                struct drm_i915_gem_object *obj = intel_fb_obj(fb);
                const int align = intel_cursor_alignment(dev_priv);
+               int err;
 
-               return i915_gem_object_attach_phys(obj, align);
+               err = i915_gem_object_attach_phys(obj, align);
+               if (err)
+                       return err;
        }
 
        vma = intel_pin_and_fence_fb_obj(fb,
@@ -13189,10 +13198,10 @@ skl_max_scale(struct intel_crtc *intel_crtc,
 }
 
 static int
-intel_check_primary_plane(struct intel_plane *plane,
-                         struct intel_crtc_state *crtc_state,
+intel_check_primary_plane(struct intel_crtc_state *crtc_state,
                          struct intel_plane_state *state)
 {
+       struct intel_plane *plane = to_intel_plane(state->base.plane);
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        struct drm_crtc *crtc = state->base.crtc;
        int min_scale = DRM_PLANE_HELPER_NO_SCALING;
@@ -13400,8 +13409,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
        case DRM_FORMAT_XBGR8888:
        case DRM_FORMAT_ARGB8888:
        case DRM_FORMAT_ABGR8888:
-               if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
-                   modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+               if (is_ccs_modifier(modifier))
                        return true;
                /* fall through */
        case DRM_FORMAT_RGB565:
@@ -13620,24 +13628,22 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
 bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
                          enum pipe pipe, enum plane_id plane_id)
 {
-       if (plane_id == PLANE_PRIMARY) {
-               if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-                       return false;
-               else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) &&
-                        !IS_GEMINILAKE(dev_priv))
-                       return false;
-       } else if (plane_id >= PLANE_SPRITE0) {
-               if (plane_id == PLANE_CURSOR)
-                       return false;
-               if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) {
-                       if (plane_id != PLANE_SPRITE0)
-                               return false;
-               } else {
-                       if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C ||
-                           IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-                               return false;
-               }
-       }
+       /*
+        * FIXME: ICL requires two hardware planes for scanning out NV12
+        * framebuffers. Do not advertize support until this is implemented.
+        */
+       if (INTEL_GEN(dev_priv) >= 11)
+               return false;
+
+       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+               return false;
+
+       if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+               return false;
+
+       if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+               return false;
+
        return true;
 }
 
@@ -14131,6 +14137,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 
        intel_pps_init(dev_priv);
 
+       if (INTEL_INFO(dev_priv)->num_pipes == 0)
+               return;
+
        /*
         * intel_edp_init_connector() depends on this completing first, to
         * prevent the registeration of both eDP and LVDS and the incorrect
@@ -14547,7 +14556,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                break;
        case DRM_FORMAT_NV12:
                if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
-                   IS_BROXTON(dev_priv)) {
+                   IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
                        DRM_DEBUG_KMS("unsupported pixel format: %s\n",
                                      drm_get_format_name(mode_cmd->pixel_format,
                                                          &format_name));
@@ -14594,8 +14603,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                 * potential runtime errors at plane configuration time.
                 */
                if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
-                   (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-                    fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
+                   is_ccs_modifier(fb->modifier))
                        stride_alignment *= 4;
 
                if (fb->pitches[i] & (stride_alignment - 1)) {
@@ -15131,12 +15139,61 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
        DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
 }
 
+static int intel_initial_commit(struct drm_device *dev)
+{
+       struct drm_atomic_state *state = NULL;
+       struct drm_modeset_acquire_ctx ctx;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int ret = 0;
+
+       state = drm_atomic_state_alloc(dev);
+       if (!state)
+               return -ENOMEM;
+
+       drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+       state->acquire_ctx = &ctx;
+
+       drm_for_each_crtc(crtc, dev) {
+               crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(crtc_state)) {
+                       ret = PTR_ERR(crtc_state);
+                       goto out;
+               }
+
+               if (crtc_state->active) {
+                       ret = drm_atomic_add_affected_planes(state, crtc);
+                       if (ret)
+                               goto out;
+               }
+       }
+
+       ret = drm_atomic_commit(state);
+
+out:
+       if (ret == -EDEADLK) {
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       }
+
+       drm_atomic_state_put(state);
+
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
+       return ret;
+}
+
 int intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        enum pipe pipe;
        struct intel_crtc *crtc;
+       int ret;
 
        dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
 
@@ -15160,9 +15217,6 @@ int intel_modeset_init(struct drm_device *dev)
 
        intel_init_pm(dev_priv);
 
-       if (INTEL_INFO(dev_priv)->num_pipes == 0)
-               return 0;
-
        /*
         * There may be no VBT; and if the BIOS enabled SSC we can
         * just keep using it to avoid unnecessary flicker.  Whereas if the
@@ -15211,8 +15265,6 @@ int intel_modeset_init(struct drm_device *dev)
                      INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
 
        for_each_pipe(dev_priv, pipe) {
-               int ret;
-
                ret = intel_crtc_init(dev_priv, pipe);
                if (ret) {
                        drm_mode_config_cleanup(dev);
@@ -15268,6 +15320,16 @@ int intel_modeset_init(struct drm_device *dev)
        if (!HAS_GMCH_DISPLAY(dev_priv))
                sanitize_watermarks(dev);
 
+       /*
+        * Force all active planes to recompute their states. So that on
+        * mode_setcrtc after probe, all the intel_plane_state variables
+        * are already calculated and there is no assert_plane warnings
+        * during bootup.
+        */
+       ret = intel_initial_commit(dev);
+       if (ret)
+               DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+
        return 0;
 }
 
@@ -15792,6 +15854,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        struct intel_encoder *encoder;
        int i;
 
+       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
        intel_early_display_was(dev_priv);
        intel_modeset_readout_hw_state(dev);
 
@@ -15846,9 +15910,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
                if (WARN_ON(put_domains))
                        modeset_put_power_domains(dev_priv, put_domains);
        }
-       intel_display_set_init_power(dev_priv, false);
 
-       intel_power_domains_verify_state(dev_priv);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
        intel_fbc_init_pipe_state(dev_priv);
 }
@@ -15937,8 +16000,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
        flush_work(&dev_priv->atomic_helper.free_work);
        WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
 
-       intel_disable_gt_powersave(dev_priv);
-
        /*
         * Interrupts and polling as the first thing to avoid creating havoc.
         * Too much stuff here (turning of connectors, ...) would
@@ -15966,8 +16027,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        intel_cleanup_overlay(dev_priv);
 
-       intel_cleanup_gt_powersave(dev_priv);
-
        intel_teardown_gmbus(dev_priv);
 
        destroy_workqueue(dev_priv->modeset_wq);
@@ -16075,8 +16134,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
                return NULL;
 
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               error->power_well_driver =
-                       I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
+               error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
 
        for_each_pipe(dev_priv, i) {
                error->pipe[i].power_domain_on =
index 138a1bc1818c1d434181f983db657a27b01c92f5..e20e6a36a74865410f0b2ba89aa358e41eb5a284 100644 (file)
 #ifndef _INTEL_DISPLAY_H_
 #define _INTEL_DISPLAY_H_
 
+enum i915_gpio {
+       GPIOA,
+       GPIOB,
+       GPIOC,
+       GPIOD,
+       GPIOE,
+       GPIOF,
+       GPIOG,
+       GPIOH,
+       __GPIOI_UNUSED,
+       GPIOJ,
+       GPIOK,
+       GPIOL,
+       GPIOM,
+};
+
 enum pipe {
        INVALID_PIPE = -1,
 
@@ -161,6 +177,13 @@ enum tc_port {
        I915_MAX_TC_PORTS
 };
 
+enum tc_port_type {
+       TC_PORT_UNKNOWN = 0,
+       TC_PORT_TYPEC,
+       TC_PORT_TBT,
+       TC_PORT_LEGACY,
+};
+
 enum dpio_channel {
        DPIO_CH0,
        DPIO_CH1
@@ -346,11 +369,11 @@ struct intel_link_m_n {
 
 #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask)    \
        for_each_power_well(__dev_priv, __power_well)                           \
-               for_each_if((__power_well)->domains & (__domain_mask))
+               for_each_if((__power_well)->desc->domains & (__domain_mask))
 
 #define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
        for_each_power_well_rev(__dev_priv, __power_well)                       \
-               for_each_if((__power_well)->domains & (__domain_mask))
+               for_each_if((__power_well)->desc->domains & (__domain_mask))
 
 #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
        for ((__i) = 0; \
@@ -382,4 +405,5 @@ void intel_link_compute_m_n(int bpp, int nlanes,
                            struct intel_link_m_n *m_n,
                            bool reduce_m_n);
 
+bool is_ccs_modifier(u64 modifier);
 #endif
index cd0f649b57a5b75dff70265637a3a4b0ead4373b..436c22de33b6e7ff7dabb50abeb230cb187ce409 100644 (file)
@@ -107,13 +107,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
        return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 }
 
-static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
-       return intel_dig_port->base.base.dev;
-}
-
 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 {
        return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
@@ -176,14 +169,45 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
        return intel_dp->common_rates[intel_dp->num_common_rates - 1];
 }
 
+static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+       u32 lane_info;
+
+       if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
+               return 4;
+
+       lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+                    DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+                   DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+       switch (lane_info) {
+       default:
+               MISSING_CASE(lane_info);
+       case 1:
+       case 2:
+       case 4:
+       case 8:
+               return 1;
+       case 3:
+       case 12:
+               return 2;
+       case 15:
+               return 4;
+       }
+}
+
 /* Theoretical max between source and sink */
 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        int source_max = intel_dig_port->max_lanes;
        int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+       int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
 
-       return min(source_max, sink_max);
+       return min3(source_max, sink_max, fia_max);
 }
 
 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
@@ -198,6 +222,138 @@ intel_dp_link_required(int pixel_clock, int bpp)
        return DIV_ROUND_UP(pixel_clock * bpp, 8);
 }
 
+void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       enum port port = intel_dig_port->base.port;
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       u32 ln0, ln1, lane_info;
+
+       if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+               return;
+
+       ln0 = I915_READ(MG_DP_MODE(port, 0));
+       ln1 = I915_READ(MG_DP_MODE(port, 1));
+
+       switch (intel_dig_port->tc_type) {
+       case TC_PORT_TYPEC:
+               ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+               ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+               lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+                            DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+                           DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+               switch (lane_info) {
+               case 0x1:
+               case 0x4:
+                       break;
+               case 0x2:
+                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+                       break;
+               case 0x3:
+                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       break;
+               case 0x8:
+                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+                       break;
+               case 0xC:
+                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       break;
+               case 0xF:
+                       ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+                              MG_DP_MODE_CFG_DP_X2_MODE;
+                       break;
+               default:
+                       MISSING_CASE(lane_info);
+               }
+               break;
+
+       case TC_PORT_LEGACY:
+               ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+               ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+               break;
+
+       default:
+               MISSING_CASE(intel_dig_port->tc_type);
+               return;
+       }
+
+       I915_WRITE(MG_DP_MODE(port, 0), ln0);
+       I915_WRITE(MG_DP_MODE(port, 1), ln1);
+}
+
+void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum port port = dig_port->base.port;
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+       u32 val;
+       int i;
+
+       if (tc_port == PORT_TC_NONE)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+               val = I915_READ(mg_regs[i]);
+               val |= MG_DP_MODE_CFG_TR2PWR_GATING |
+                      MG_DP_MODE_CFG_TRPWR_GATING |
+                      MG_DP_MODE_CFG_CLNPWR_GATING |
+                      MG_DP_MODE_CFG_DIGPWR_GATING |
+                      MG_DP_MODE_CFG_GAONPWR_GATING;
+               I915_WRITE(mg_regs[i], val);
+       }
+
+       val = I915_READ(MG_MISC_SUS0(tc_port));
+       val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
+              MG_MISC_SUS0_CFG_TR2PWR_GATING |
+              MG_MISC_SUS0_CFG_CL2PWR_GATING |
+              MG_MISC_SUS0_CFG_GAONPWR_GATING |
+              MG_MISC_SUS0_CFG_TRPWR_GATING |
+              MG_MISC_SUS0_CFG_CL1PWR_GATING |
+              MG_MISC_SUS0_CFG_DGPWR_GATING;
+       I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum port port = dig_port->base.port;
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+       u32 val;
+       int i;
+
+       if (tc_port == PORT_TC_NONE)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+               val = I915_READ(mg_regs[i]);
+               val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
+                        MG_DP_MODE_CFG_TRPWR_GATING |
+                        MG_DP_MODE_CFG_CLNPWR_GATING |
+                        MG_DP_MODE_CFG_DIGPWR_GATING |
+                        MG_DP_MODE_CFG_GAONPWR_GATING);
+               I915_WRITE(mg_regs[i], val);
+       }
+
+       val = I915_READ(MG_MISC_SUS0(tc_port));
+       val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
+                MG_MISC_SUS0_CFG_TR2PWR_GATING |
+                MG_MISC_SUS0_CFG_CL2PWR_GATING |
+                MG_MISC_SUS0_CFG_GAONPWR_GATING |
+                MG_MISC_SUS0_CFG_TRPWR_GATING |
+                MG_MISC_SUS0_CFG_CL1PWR_GATING |
+                MG_MISC_SUS0_CFG_DGPWR_GATING);
+       I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
 int
 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 {
@@ -498,7 +654,7 @@ intel_dp_pps_init(struct intel_dp *intel_dp);
 
 static void pps_lock(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        /*
         * See intel_power_sequencer_reset() why we need
@@ -511,7 +667,7 @@ static void pps_lock(struct intel_dp *intel_dp)
 
 static void pps_unlock(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        mutex_unlock(&dev_priv->pps_mutex);
 
@@ -521,7 +677,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
 static void
 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum pipe pipe = intel_dp->pps_pipe;
        bool pll_enabled, release_cl_override = false;
@@ -626,7 +782,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 static enum pipe
 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum pipe pipe;
 
@@ -673,7 +829,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 static int
 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        int backlight_controller = dev_priv->vbt.backlight.controller;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -742,7 +898,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 static void
 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
 
@@ -819,7 +975,7 @@ struct pps_registers {
 static void intel_pps_get_registers(struct intel_dp *intel_dp,
                                    struct pps_registers *regs)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        int pps_idx = 0;
 
        memset(regs, 0, sizeof(*regs));
@@ -865,7 +1021,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 {
        struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
                                                 edp_notifier);
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
                return 0;
@@ -895,7 +1051,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 
 static bool edp_have_panel_power(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -908,7 +1064,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
 
 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -922,7 +1078,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 static void
 intel_dp_check_edp(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (!intel_dp_is_edp(intel_dp))
                return;
@@ -938,7 +1094,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
 static uint32_t
 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
        uint32_t status;
        bool done;
@@ -955,7 +1111,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
 
 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (index)
                return 0;
@@ -969,7 +1125,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 
 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (index)
                return 0;
@@ -987,7 +1143,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 
 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
                /* Workaround for non-ULT HSW */
@@ -1045,15 +1201,23 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
                                      int send_bytes,
                                      uint32_t unused)
 {
-       return DP_AUX_CH_CTL_SEND_BUSY |
-              DP_AUX_CH_CTL_DONE |
-              DP_AUX_CH_CTL_INTERRUPT |
-              DP_AUX_CH_CTL_TIME_OUT_ERROR |
-              DP_AUX_CH_CTL_TIME_OUT_MAX |
-              DP_AUX_CH_CTL_RECEIVE_ERROR |
-              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-              DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
-              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       uint32_t ret;
+
+       ret = DP_AUX_CH_CTL_SEND_BUSY |
+             DP_AUX_CH_CTL_DONE |
+             DP_AUX_CH_CTL_INTERRUPT |
+             DP_AUX_CH_CTL_TIME_OUT_ERROR |
+             DP_AUX_CH_CTL_TIME_OUT_MAX |
+             DP_AUX_CH_CTL_RECEIVE_ERROR |
+             (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+             DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+             DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+       if (intel_dig_port->tc_type == TC_PORT_TBT)
+               ret |= DP_AUX_CH_CTL_TBT_IO;
+
+       return ret;
 }
 
 static int
@@ -1381,7 +1545,7 @@ intel_aux_power_domain(struct intel_dp *intel_dp)
 
 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum aux_ch aux_ch = intel_dp->aux_ch;
 
        switch (aux_ch) {
@@ -1397,7 +1561,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
 
 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum aux_ch aux_ch = intel_dp->aux_ch;
 
        switch (aux_ch) {
@@ -1413,7 +1577,7 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 
 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum aux_ch aux_ch = intel_dp->aux_ch;
 
        switch (aux_ch) {
@@ -1431,7 +1595,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 
 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum aux_ch aux_ch = intel_dp->aux_ch;
 
        switch (aux_ch) {
@@ -1449,7 +1613,7 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
 
 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum aux_ch aux_ch = intel_dp->aux_ch;
 
        switch (aux_ch) {
@@ -1468,7 +1632,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
 
 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum aux_ch aux_ch = intel_dp->aux_ch;
 
        switch (aux_ch) {
@@ -1494,7 +1658,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
 static void
 intel_dp_aux_init(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 
        intel_dp->aux_ch = intel_aux_ch(intel_dp);
@@ -1662,7 +1826,7 @@ struct link_config_limits {
 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
                                struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int bpp, bpc;
 
@@ -2030,7 +2194,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
                                       u32 mask,
                                       u32 value)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2106,7 +2270,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 
 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 control;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2127,7 +2291,7 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
  */
 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        u32 pp;
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -2198,7 +2362,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
 
 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port =
                dp_to_dig_port(intel_dp);
        u32 pp;
@@ -2264,7 +2428,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
  */
 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -2284,7 +2448,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 
 static void edp_panel_on(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2342,7 +2506,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
 
 static void edp_panel_off(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2390,7 +2554,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
 /* Enable backlight in the panel power control. */
 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2433,7 +2597,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
 /* Disable backlight in the panel power control. */
 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2864,7 +3028,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                         uint32_t *DP,
                         uint8_t dp_train_pat)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
        uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
@@ -2946,7 +3110,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 static void intel_dp_enable_port(struct intel_dp *intel_dp,
                                 const struct intel_crtc_state *old_crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        /* enable with pattern 1 (as per spec) */
 
@@ -3203,7 +3367,7 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
 uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        enum port port = encoder->port;
 
@@ -3222,7 +3386,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
 uint8_t
 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        enum port port = encoder->port;
 
@@ -3534,7 +3698,7 @@ ivb_cpu_edp_signal_levels(uint8_t train_set)
 void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
        uint32_t signal_levels, mask = 0;
@@ -3591,7 +3755,7 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
 
 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
        uint32_t val;
@@ -4090,12 +4254,14 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
                int ret = 0;
                int retry;
                bool handled;
+
+               WARN_ON_ONCE(intel_dp->active_mst_links < 0);
                bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
 go_again:
                if (bret == true) {
 
                        /* check link status - esi[10] = 0x200c */
-                       if (intel_dp->active_mst_links &&
+                       if (intel_dp->active_mst_links > 0 &&
                            !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
                                DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
                                intel_dp_start_link_train(intel_dp);
@@ -4160,18 +4326,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
        return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
 }
 
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx)
 {
@@ -4294,7 +4448,7 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
 static bool
 intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u8 sink_irq_vector = 0;
        u8 old_sink_count = intel_dp->sink_count;
        bool ret;
@@ -4586,10 +4740,205 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
        return I915_READ(GEN8_DE_PORT_ISR) & bit;
 }
 
+static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
+                                    struct intel_digital_port *intel_dig_port)
+{
+       enum port port = intel_dig_port->base.port;
+
+       return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
+}
+
+static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
+                                   struct intel_digital_port *intel_dig_port,
+                                   bool is_legacy, bool is_typec, bool is_tbt)
+{
+       enum port port = intel_dig_port->base.port;
+       enum tc_port_type old_type = intel_dig_port->tc_type;
+       const char *type_str;
+
+       WARN_ON(is_legacy + is_typec + is_tbt != 1);
+
+       if (is_legacy) {
+               intel_dig_port->tc_type = TC_PORT_LEGACY;
+               type_str = "legacy";
+       } else if (is_typec) {
+               intel_dig_port->tc_type = TC_PORT_TYPEC;
+               type_str = "typec";
+       } else if (is_tbt) {
+               intel_dig_port->tc_type = TC_PORT_TBT;
+               type_str = "tbt";
+       } else {
+               return;
+       }
+
+       /* Types are not supposed to be changed at runtime. */
+       WARN_ON(old_type != TC_PORT_UNKNOWN &&
+               old_type != intel_dig_port->tc_type);
+
+       if (old_type != intel_dig_port->tc_type)
+               DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
+                             type_str);
+}
+
+/*
+ * This function implements the first part of the Connect Flow described by our
+ * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
+ * lanes, EDID, etc) is done as needed in the typical places.
+ *
+ * Unlike the other ports, type-C ports are not available to use as soon as we
+ * get a hotplug. The type-C PHYs can be shared between multiple controllers:
+ * display, USB, etc. As a result, handshaking through FIA is required around
+ * connect and disconnect to cleanly transfer ownership with the controller and
+ * set the type-C power state.
+ *
+ * We could opt to only do the connect flow when we actually try to use the AUX
+ * channels or do a modeset, then immediately run the disconnect flow after
+ * usage, but there are some implications on this for a dynamic environment:
+ * things may go away or change behind our backs. So for now our driver is
+ * always trying to acquire ownership of the controller as soon as it gets an
+ * interrupt (or polls state and sees a port is connected) and only gives it
+ * back when it sees a disconnect. Implementation of a more fine-grained model
+ * will require a lot of coordination with user space and thorough testing for
+ * the extra possible cases.
+ */
+static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
+                              struct intel_digital_port *dig_port)
+{
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+       u32 val;
+
+       if (dig_port->tc_type != TC_PORT_LEGACY &&
+           dig_port->tc_type != TC_PORT_TYPEC)
+               return true;
+
+       val = I915_READ(PORT_TX_DFLEXDPPMS);
+       if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
+               DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
+               return false;
+       }
+
+       /*
+        * This function may be called many times in a row without an HPD event
+        * in between, so try to avoid the write when we can.
+        */
+       val = I915_READ(PORT_TX_DFLEXDPCSSS);
+       if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
+               val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+               I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+       }
+
+       /*
+        * Now we have to re-check the live state, in case the port recently
+        * became disconnected. Not necessary for legacy mode.
+        */
+       if (dig_port->tc_type == TC_PORT_TYPEC &&
+           !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
+               DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
+               val = I915_READ(PORT_TX_DFLEXDPCSSS);
+               val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+               I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+               return false;
+       }
+
+       return true;
+}
+
+/*
+ * See the comment at the connect function. This implements the Disconnect
+ * Flow.
+ */
+static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+                                 struct intel_digital_port *dig_port)
+{
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+       u32 val;
+
+       if (dig_port->tc_type != TC_PORT_LEGACY &&
+           dig_port->tc_type != TC_PORT_TYPEC)
+               return;
+
+       /*
+        * This function may be called many times in a row without an HPD event
+        * in between, so try to avoid the write when we can.
+        */
+       val = I915_READ(PORT_TX_DFLEXDPCSSS);
+       if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) {
+               val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+               I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+       }
+}
+
+/*
+ * The type-C ports are different because even when they are connected, they may
+ * not be available/usable by the graphics driver: see the comment on
+ * icl_tc_phy_connect(). So in our driver instead of adding the additional
+ * concept of "usable" and make everything check for "connected and usable" we
+ * define a port as "connected" when it is not only connected, but also when it
+ * is usable by the rest of the driver. That maintains the old assumption that
+ * connected ports are usable, and avoids exposing to the users objects they
+ * can't really use.
+ */
+static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
+                                 struct intel_digital_port *intel_dig_port)
+{
+       enum port port = intel_dig_port->base.port;
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+       bool is_legacy, is_typec, is_tbt;
+       u32 dpsp;
+
+       is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port);
+
+       /*
+        * The spec says we shouldn't be using the ISR bits for detecting
+        * between TC and TBT. We should use DFLEXDPSP.
+        */
+       dpsp = I915_READ(PORT_TX_DFLEXDPSP);
+       is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
+       is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
+
+       if (!is_legacy && !is_typec && !is_tbt) {
+               icl_tc_phy_disconnect(dev_priv, intel_dig_port);
+               return false;
+       }
+
+       icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
+                               is_tbt);
+
+       if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
+               return false;
+
+       return true;
+}
+
+static bool icl_digital_port_connected(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+
+       switch (encoder->hpd_pin) {
+       case HPD_PORT_A:
+       case HPD_PORT_B:
+               return icl_combo_port_connected(dev_priv, dig_port);
+       case HPD_PORT_C:
+       case HPD_PORT_D:
+       case HPD_PORT_E:
+       case HPD_PORT_F:
+               return icl_tc_port_connected(dev_priv, dig_port);
+       default:
+               MISSING_CASE(encoder->hpd_pin);
+               return false;
+       }
+}
+
 /*
  * intel_digital_port_connected - is the specified port connected?
  * @encoder: intel_encoder
  *
+ * In cases where there's a connector physically connected but it can't be used
+ * by our hardware we also return false, since the rest of the driver should
+ * pretty much treat the port as disconnected. This is relevant for type-C
+ * (starting on ICL) where there's ownership involved.
+ *
  * Return %true if port is connected, %false otherwise.
  */
 bool intel_digital_port_connected(struct intel_encoder *encoder)
@@ -4613,8 +4962,10 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
                return bdw_digital_port_connected(encoder);
        else if (IS_GEN9_LP(dev_priv))
                return bxt_digital_port_connected(encoder);
-       else
+       else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
                return spt_digital_port_connected(encoder);
+       else
+               return icl_digital_port_connected(encoder);
 }
 
 static struct edid *
@@ -4661,7 +5012,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 }
 
 static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+                   struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +5072,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
                 */
                status = connector_status_disconnected;
                goto out;
+       } else {
+               /*
+                * If display is now connected check links status,
+                * there has been known issues of link loss triggering
+                * long pulse.
+                *
+                * Some sinks (eg. ASUS PB287Q) seem to perform some
+                * weird HPD ping pong during modesets. So we can apparently
+                * end up with HPD going low during a modeset, and then
+                * going back up soon after. And once that happens we must
+                * retrain the link to get a picture. That's in case no
+                * userspace component reacted to intermittent HPD dip.
+                */
+               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+               intel_dp_retrain_link(encoder, ctx);
        }
 
        /*
@@ -4781,7 +5149,7 @@ intel_dp_detect(struct drm_connector *connector,
                                return ret;
                }
 
-               status = intel_dp_long_pulse(intel_dp->attached_connector);
+               status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
        }
 
        intel_dp->detect_done = false;
@@ -5172,7 +5540,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
 
 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -5193,7 +5561,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 
 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        enum pipe pipe;
 
@@ -5260,7 +5628,7 @@ enum irqreturn
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum irqreturn ret = IRQ_NONE;
 
        if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5376,7 +5744,7 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
 static void
 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
        struct pps_registers regs;
 
@@ -5444,7 +5812,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
 static void
 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct edp_power_seq cur, vbt, spec,
                *final = &intel_dp->pps_delays;
 
@@ -5537,7 +5905,7 @@ static void
 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
                                              bool force_disable_vdd)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 pp_on, pp_off, pp_div, port_sel = 0;
        int div = dev_priv->rawclk_freq / 1000;
        struct pps_registers regs;
@@ -5633,7 +6001,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 
 static void intel_dp_pps_init(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                vlv_initial_power_sequencer_setup(intel_dp);
@@ -5750,7 +6118,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
                           const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (!crtc_state->has_drrs) {
                DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
@@ -5785,7 +6153,7 @@ unlock:
 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
                            const struct intel_crtc_state *old_crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (!old_crtc_state->has_drrs)
                return;
@@ -6017,8 +6385,8 @@ intel_dp_drrs_init(struct intel_connector *connector,
 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
                                     struct intel_connector *intel_connector)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_connector *connector = &intel_connector->base;
        struct drm_display_mode *fixed_mode = NULL;
        struct drm_display_mode *downclock_mode = NULL;
index 4da6e33c7fa1c9a06839fc3777f074e66309f096..a9f40985a621209a4d192059eb0474a19748e206 100644 (file)
@@ -129,7 +129,8 @@ static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
        uint8_t voltage;
-       int voltage_tries, max_vswing_tries;
+       int voltage_tries, cr_tries, max_cr_tries;
+       bool max_vswing_reached = false;
        uint8_t link_config[2];
        uint8_t link_bw, rate_select;
 
@@ -170,9 +171,21 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                return false;
        }
 
+       /*
+        * The DP 1.4 spec defines the max clock recovery retries value
+        * as 10 but for pre-DP 1.4 devices we set a very tolerant
+        * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
+        * x 5 identical voltage retries). Since the previous specs didn't
+        * define a limit and created the possibility of an infinite loop
+        * we want to prevent any sync from triggering that corner case.
+        */
+       if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
+               max_cr_tries = 10;
+       else
+               max_cr_tries = 80;
+
        voltage_tries = 1;
-       max_vswing_tries = 0;
-       for (;;) {
+       for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
                uint8_t link_status[DP_LINK_STATUS_SIZE];
 
                drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
@@ -192,7 +205,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                        return false;
                }
 
-               if (max_vswing_tries == 1) {
+               if (max_vswing_reached) {
                        DRM_DEBUG_KMS("Max Voltage Swing reached\n");
                        return false;
                }
@@ -213,9 +226,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                        voltage_tries = 1;
 
                if (intel_dp_link_max_vswing_reached(intel_dp))
-                       ++max_vswing_tries;
+                       max_vswing_reached = true;
 
        }
+       DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+       return false;
 }
 
 /*
index 7e3e01607643d3e6b4dfc6332284be001966e280..77920f1a3da1ac6e6498f047f2b9dfc39015ea23 100644 (file)
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        /* this can fail */
        drm_dp_check_act_status(&intel_dp->mst_mgr);
        /* and this can also fail */
@@ -241,17 +243,16 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
                                       connector->port,
                                       pipe_config->pbn,
                                       pipe_config->dp_m_n.tu);
-       if (ret == false) {
+       if (!ret)
                DRM_ERROR("failed to allocate vcpi\n");
-               return;
-       }
-
 
        intel_dp->active_mst_links++;
        temp = I915_READ(DP_TP_STATUS(port));
        I915_WRITE(DP_TP_STATUS(port), temp);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+       intel_ddi_enable_pipe_clock(pipe_config);
 }
 
 static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -263,7 +264,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_dig_port->base.port;
-       int ret;
 
        DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
@@ -274,9 +274,9 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
                                    1))
                DRM_ERROR("Timed out waiting for ACT sent\n");
 
-       ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+       drm_dp_check_act_status(&intel_dp->mst_mgr);
 
-       ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+       drm_dp_update_payload_part2(&intel_dp->mst_mgr);
        if (pipe_config->has_audio)
                intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
index b51ad2917dbef4528d9a7c528e603f9602cd39fd..e6cac9225536a6ce39d44d6f898e6577b042dba0 100644 (file)
@@ -2212,6 +2212,20 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
        params->dco_fraction = dco & 0x7fff;
 }
 
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
+{
+       int ref_clock = dev_priv->cdclk.hw.ref;
+
+       /*
+        * For ICL+, the spec states: if reference frequency is 38.4,
+        * use 19.2 because the DPLL automatically divides that by 2.
+        */
+       if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
+               ref_clock = 19200;
+
+       return ref_clock;
+}
+
 static bool
 cnl_ddi_calculate_wrpll(int clock,
                        struct drm_i915_private *dev_priv,
@@ -2251,14 +2265,7 @@ cnl_ddi_calculate_wrpll(int clock,
 
        cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
 
-       ref_clock = dev_priv->cdclk.hw.ref;
-
-       /*
-        * For ICL, the spec states: if reference frequency is 38.4, use 19.2
-        * because the DPLL automatically divides that by 2.
-        */
-       if (IS_ICELAKE(dev_priv) && ref_clock == 38400)
-               ref_clock = 19200;
+       ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
 
        cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
                                  kdiv);
@@ -2452,6 +2459,16 @@ static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
          .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
 };
 
+static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
+       .dco_integer = 0x151, .dco_fraction = 0x4000,
+       .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
+       .dco_integer = 0x1A5, .dco_fraction = 0x7000,
+       .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
 static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
                                  struct skl_wrpll_params *pll_params)
 {
@@ -2494,6 +2511,14 @@ static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
        return true;
 }
 
+static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
+                            struct skl_wrpll_params *pll_params)
+{
+       *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
+                       icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
+       return true;
+}
+
 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
                                struct intel_encoder *encoder, int clock,
                                struct intel_dpll_hw_state *pll_state)
@@ -2503,7 +2528,9 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
        struct skl_wrpll_params pll_params = { 0 };
        bool ret;
 
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+       if (intel_port_is_tc(dev_priv, encoder->port))
+               ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
+       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
        else
                ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
@@ -2623,7 +2650,8 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
 
                for (div2 = 10; div2 > 0; div2--) {
                        int dco = div1 * div2 * clock_khz * 5;
-                       int a_divratio, tlinedrv, inputsel, hsdiv;
+                       int a_divratio, tlinedrv, inputsel;
+                       u32 hsdiv;
 
                        if (dco < dco_min_freq || dco > dco_max_freq)
                                continue;
@@ -2642,16 +2670,16 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
                                MISSING_CASE(div1);
                                /* fall through */
                        case 2:
-                               hsdiv = 0;
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
                                break;
                        case 3:
-                               hsdiv = 1;
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
                                break;
                        case 5:
-                               hsdiv = 2;
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
                                break;
                        case 7:
-                               hsdiv = 3;
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
                                break;
                        }
 
@@ -2665,7 +2693,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
                        state->mg_clktop2_hsclkctl =
                                MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
                                MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
-                               MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(hsdiv) |
+                               hsdiv |
                                MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
 
                        return true;
@@ -2846,6 +2874,8 @@ static struct intel_shared_dpll *
 icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
             struct intel_encoder *encoder)
 {
+       struct intel_digital_port *intel_dig_port =
+                       enc_to_dig_port(&encoder->base);
        struct intel_shared_dpll *pll;
        struct intel_dpll_hw_state pll_state = {};
        enum port port = encoder->port;
@@ -2865,7 +2895,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        case PORT_D:
        case PORT_E:
        case PORT_F:
-               if (0 /* TODO: TBT PLLs */) {
+               if (intel_dig_port->tc_type == TC_PORT_TBT) {
                        min = DPLL_ID_ICL_TBTPLL;
                        max = min;
                        ret = icl_calc_dpll_state(crtc_state, encoder, clock,
index 7e522cf4f13f3bb35991a8771cbd3e4755a902cb..bf0de8a4dc6378c9bd6de5432449ec1afac10bd1 100644 (file)
@@ -344,5 +344,6 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
                              struct intel_dpll_hw_state *hw_state);
 int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
                               uint32_t pll_id);
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
index 5f63e1a9c25b9c31331ad87edb3ee5a5466d434f..f5731215210a409c29636a33c9df07913781e63f 100644 (file)
@@ -972,9 +972,8 @@ struct intel_plane {
        void (*disable_plane)(struct intel_plane *plane,
                              struct intel_crtc *crtc);
        bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
-       int (*check_plane)(struct intel_plane *plane,
-                          struct intel_crtc_state *crtc_state,
-                          struct intel_plane_state *state);
+       int (*check_plane)(struct intel_crtc_state *crtc_state,
+                          struct intel_plane_state *plane_state);
 };
 
 struct intel_watermark_params {
@@ -1168,6 +1167,7 @@ struct intel_digital_port {
        bool release_cl2_override;
        uint8_t max_lanes;
        enum intel_display_power_domain ddi_io_power_domain;
+       enum tc_port_type tc_type;
 
        void (*write_infoframe)(struct drm_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
@@ -1314,6 +1314,12 @@ dp_to_lspcon(struct intel_dp *intel_dp)
        return &dp_to_dig_port(intel_dp)->lspcon;
 }
 
+static inline struct drm_i915_private *
+dp_to_i915(struct intel_dp *intel_dp)
+{
+       return to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+}
+
 static inline struct intel_digital_port *
 hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
 {
@@ -1717,6 +1723,9 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
                               unsigned int frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits);
+void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
+void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
+void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1930,6 +1939,9 @@ void intel_psr_enable(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state);
 void intel_psr_disable(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *old_crtc_state);
+int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
+                              struct drm_modeset_acquire_ctx *ctx,
+                              u64 value);
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
                          unsigned frontbuffer_bits,
                          enum fb_op_origin origin);
@@ -1939,20 +1951,33 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 void intel_psr_init(struct drm_i915_private *dev_priv);
 void intel_psr_compute_config(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
 void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+                           u32 *out_value);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
-void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+
+enum i915_drm_suspend_mode {
+       I915_DRM_SUSPEND_IDLE,
+       I915_DRM_SUSPEND_MEM,
+       I915_DRM_SUSPEND_HIBERNATE,
+};
+
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+                                enum i915_drm_suspend_mode);
+void intel_power_domains_resume(struct drm_i915_private *dev_priv);
 void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
 void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
 const char *
 intel_display_power_domain_str(enum intel_display_power_domain domain);
 
@@ -2030,8 +2055,6 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
 
-void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
-
 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
                             bool override, unsigned int mask);
 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
index 2d1952849d69ffec45b64360cc7d1baf78e0b0ba..10cd051ba29eebfc5d14c1ca7779784e4705aa73 100644 (file)
@@ -513,7 +513,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine,
                goto err_unref;
        }
 
-       ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
+       ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
        if (ret)
                goto err_unref;
 
@@ -527,36 +527,19 @@ err_unref:
 
 void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
 {
-       i915_vma_unpin_and_release(&engine->scratch);
-}
-
-static void cleanup_phys_status_page(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       if (!dev_priv->status_page_dmah)
-               return;
-
-       drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
-       engine->status_page.page_addr = NULL;
+       i915_vma_unpin_and_release(&engine->scratch, 0);
 }
 
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
-       struct i915_vma *vma;
-       struct drm_i915_gem_object *obj;
-
-       vma = fetch_and_zero(&engine->status_page.vma);
-       if (!vma)
-               return;
-
-       obj = vma->obj;
+       if (HWS_NEEDS_PHYSICAL(engine->i915)) {
+               void *addr = fetch_and_zero(&engine->status_page.page_addr);
 
-       i915_vma_unpin(vma);
-       i915_vma_close(vma);
+               __free_page(virt_to_page(addr));
+       }
 
-       i915_gem_object_unpin_map(obj);
-       __i915_gem_object_release_unless_active(obj);
+       i915_vma_unpin_and_release(&engine->status_page.vma,
+                                  I915_VMA_RELEASE_MAP);
 }
 
 static int init_status_page(struct intel_engine_cs *engine)
@@ -598,7 +581,7 @@ static int init_status_page(struct intel_engine_cs *engine)
                flags |= PIN_MAPPABLE;
        else
                flags |= PIN_HIGH;
-       ret = i915_vma_pin(vma, 0, 4096, flags);
+       ret = i915_vma_pin(vma, 0, 0, flags);
        if (ret)
                goto err;
 
@@ -622,17 +605,18 @@ err:
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
+       struct page *page;
 
-       GEM_BUG_ON(engine->id != RCS);
-
-       dev_priv->status_page_dmah =
-               drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
-       if (!dev_priv->status_page_dmah)
+       /*
+        * Though the HWS register does support 36bit addresses, historically
+        * we have had hangs and corruption reported due to wild writes if
+        * the HWS is placed above 4G.
+        */
+       page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO);
+       if (!page)
                return -ENOMEM;
 
-       engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-       memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+       engine->status_page.page_addr = page_address(page);
 
        return 0;
 }
@@ -722,10 +706,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 
        intel_engine_cleanup_scratch(engine);
 
-       if (HWS_NEEDS_PHYSICAL(engine->i915))
-               cleanup_phys_status_page(engine);
-       else
-               cleanup_status_page(engine);
+       cleanup_status_page(engine);
 
        intel_engine_fini_breadcrumbs(engine);
        intel_engine_cleanup_cmd_parser(engine);
@@ -800,6 +781,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
        return err;
 }
 
+void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
+                     _MASKED_BIT_DISABLE(STOP_RING));
+}
+
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 {
        switch (type) {
@@ -980,8 +971,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return true;
 
        /* Any inflight/incomplete requests? */
-       if (!i915_seqno_passed(intel_engine_get_seqno(engine),
-                              intel_engine_last_submit(engine)))
+       if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
                return false;
 
        if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
@@ -1348,20 +1338,19 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 
        if (HAS_EXECLISTS(dev_priv)) {
                const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
-               u32 ptr, read, write;
                unsigned int idx;
+               u8 read, write;
 
                drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
                           I915_READ(RING_EXECLIST_STATUS_LO(engine)),
                           I915_READ(RING_EXECLIST_STATUS_HI(engine)));
 
-               ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
-               read = GEN8_CSB_READ_PTR(ptr);
-               write = GEN8_CSB_WRITE_PTR(ptr);
-               drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n",
-                          read, execlists->csb_head,
-                          write,
-                          intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
+               read = execlists->csb_head;
+               write = READ_ONCE(*execlists->csb_write);
+
+               drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
+                          read, write,
+                          GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
                           yesno(test_bit(TASKLET_STATE_SCHED,
                                          &engine->execlists.tasklet.state)),
                           enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@@ -1373,12 +1362,12 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
                        write += GEN8_CSB_ENTRIES;
                while (read < write) {
                        idx = ++read % GEN8_CSB_ENTRIES;
-                       drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
+                       drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
                                   idx,
-                                  I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
                                   hws[idx * 2],
-                                  I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
-                                  hws[idx * 2 + 1]);
+                                  I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+                                  hws[idx * 2 + 1],
+                                  I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
                }
 
                rcu_read_lock();
index 560c7406ae406e5df1d2b083024ea7ae983db4e8..230aea69385d4fb5e682d7991e5bc252a7012b6d 100644 (file)
@@ -27,8 +27,6 @@
 #include "intel_guc_submission.h"
 #include "i915_drv.h"
 
-static void guc_init_ggtt_pin_bias(struct intel_guc *guc);
-
 static void gen8_guc_raise_irq(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -128,13 +126,15 @@ static int guc_init_wq(struct intel_guc *guc)
 
 static void guc_fini_wq(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct workqueue_struct *wq;
 
-       if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
-           USES_GUC_SUBMISSION(dev_priv))
-               destroy_workqueue(guc->preempt_wq);
+       wq = fetch_and_zero(&guc->preempt_wq);
+       if (wq)
+               destroy_workqueue(wq);
 
-       destroy_workqueue(guc->log.relay.flush_wq);
+       wq = fetch_and_zero(&guc->log.relay.flush_wq);
+       if (wq)
+               destroy_workqueue(wq);
 }
 
 int intel_guc_init_misc(struct intel_guc *guc)
@@ -142,8 +142,6 @@ int intel_guc_init_misc(struct intel_guc *guc)
        struct drm_i915_private *i915 = guc_to_i915(guc);
        int ret;
 
-       guc_init_ggtt_pin_bias(guc);
-
        ret = guc_init_wq(guc);
        if (ret)
                return ret;
@@ -170,7 +168,7 @@ static int guc_shared_data_create(struct intel_guc *guc)
 
        vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
-               i915_vma_unpin_and_release(&vma);
+               i915_vma_unpin_and_release(&vma, 0);
                return PTR_ERR(vaddr);
        }
 
@@ -182,8 +180,7 @@ static int guc_shared_data_create(struct intel_guc *guc)
 
 static void guc_shared_data_destroy(struct intel_guc *guc)
 {
-       i915_gem_object_unpin_map(guc->shared_data->obj);
-       i915_vma_unpin_and_release(&guc->shared_data);
+       i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
 }
 
 int intel_guc_init(struct intel_guc *guc)
@@ -584,52 +581,27 @@ int intel_guc_resume(struct intel_guc *guc)
  *
  * ::
  *
- *     +==============> +====================+ <== GUC_GGTT_TOP
- *     ^                |                    |
- *     |                |                    |
- *     |                |        DRAM        |
- *     |                |       Memory       |
- *     |                |                    |
- *    GuC               |                    |
- *  Address  +========> +====================+ <== WOPCM Top
- *   Space   ^          |   HW contexts RSVD |
- *     |     |          |        WOPCM       |
- *     |     |     +==> +--------------------+ <== GuC WOPCM Top
- *     |    GuC    ^    |                    |
- *     |    GGTT   |    |                    |
- *     |    Pin   GuC   |        GuC         |
- *     |    Bias WOPCM  |       WOPCM        |
- *     |     |    Size  |                    |
- *     |     |     |    |                    |
- *     v     v     v    |                    |
- *     +=====+=====+==> +====================+ <== GuC WOPCM Base
- *                      |   Non-GuC WOPCM    |
- *                      |   (HuC/Reserved)   |
- *                      +====================+ <== WOPCM Base
+ *     +===========> +====================+ <== FFFF_FFFF
+ *     ^             |      Reserved      |
+ *     |             +====================+ <== GUC_GGTT_TOP
+ *     |             |                    |
+ *     |             |        DRAM        |
+ *    GuC            |                    |
+ *  Address    +===> +====================+ <== GuC ggtt_pin_bias
+ *   Space     ^     |                    |
+ *     |       |     |                    |
+ *     |      GuC    |        GuC         |
+ *     |     WOPCM   |       WOPCM        |
+ *     |      Size   |                    |
+ *     |       |     |                    |
+ *     v       v     |                    |
+ *     +=======+===> +====================+ <== 0000_0000
  *
- * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM
+ * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
- * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and
- * actual GuC WOPCM size.
+ * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
  */
 
-/**
- * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
- * @guc: intel_guc structure.
- *
- * This function will calculate and initialize the ggtt_pin_bias value based on
- * overall WOPCM size and GuC WOPCM size.
- */
-static void guc_init_ggtt_pin_bias(struct intel_guc *guc)
-{
-       struct drm_i915_private *i915 = guc_to_i915(guc);
-
-       GEM_BUG_ON(!i915->wopcm.size);
-       GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base);
-
-       guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base;
-}
-
 /**
  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
  * @guc:       the guc
@@ -648,6 +620,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
+       u64 flags;
        int ret;
 
        obj = i915_gem_object_create(dev_priv, size);
@@ -658,8 +631,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
        if (IS_ERR(vma))
                goto err;
 
-       ret = i915_vma_pin(vma, 0, PAGE_SIZE,
-                          PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
+       flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+       ret = i915_vma_pin(vma, 0, 0, flags);
        if (ret) {
                vma = ERR_PTR(ret);
                goto err;
@@ -671,3 +644,20 @@ err:
        i915_gem_object_put(obj);
        return vma;
 }
+
+/**
+ * intel_guc_reserved_gtt_size()
+ * @guc:       intel_guc structure
+ *
+ * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
+ * GuC we can't have any objects pinned in that region. This function returns
+ * the size of the shadowed region.
+ *
+ * Returns:
+ * 0 if GuC is not present or not in use.
+ * Otherwise, the GuC WOPCM size.
+ */
+u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
+{
+       return guc_to_i915(guc)->wopcm.guc.size;
+}
index 4121928a495e0ccfc4af79aeb27430f26dd15959..ad42faf48c46a3f3773ce6eae1c9702b8379c041 100644 (file)
@@ -49,9 +49,6 @@ struct intel_guc {
        struct intel_guc_log log;
        struct intel_guc_ct ct;
 
-       /* Offset where Non-WOPCM memory starts. */
-       u32 ggtt_pin_bias;
-
        /* Log snapshot if GuC errors during load */
        struct drm_i915_gem_object *load_err_log;
 
@@ -130,10 +127,10 @@ static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
  * @vma: i915 graphics virtual memory area.
  *
  * GuC does not allow any gfx GGTT address that falls into range
- * [0, GuC ggtt_pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
- * Currently, in order to exclude [0, GuC ggtt_pin_bias) address space from
+ * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
+ * Currently, in order to exclude [0, ggtt.pin_bias) address space from
  * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
- * and pinned with PIN_OFFSET_BIAS along with the value of GuC ggtt_pin_bias.
+ * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
  *
  * Return: GGTT offset of the @vma.
  */
@@ -142,7 +139,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
 {
        u32 offset = i915_ggtt_offset(vma);
 
-       GEM_BUG_ON(offset < guc->ggtt_pin_bias);
+       GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
        GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
 
        return offset;
@@ -168,6 +165,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct intel_guc *guc);
 int intel_guc_resume(struct intel_guc *guc);
 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+u32 intel_guc_reserved_gtt_size(struct intel_guc *guc);
 
 static inline int intel_guc_sanitize(struct intel_guc *guc)
 {
index dcaa3fb71765e2f6f2a7b02b00768dfe97ea3c9a..f0db62887f5098f3a72433b6f838cb0e2744babd 100644 (file)
@@ -148,5 +148,5 @@ int intel_guc_ads_create(struct intel_guc *guc)
 
 void intel_guc_ads_destroy(struct intel_guc *guc)
 {
-       i915_vma_unpin_and_release(&guc->ads_vma);
+       i915_vma_unpin_and_release(&guc->ads_vma, 0);
 }
index 371b6005954aab09b69c15704343153c73e8d5a5..a52883e9146f273237bf9c259c3bcfa6f224a3ea 100644 (file)
@@ -204,7 +204,7 @@ static int ctch_init(struct intel_guc *guc,
        return 0;
 
 err_vma:
-       i915_vma_unpin_and_release(&ctch->vma);
+       i915_vma_unpin_and_release(&ctch->vma, 0);
 err_out:
        CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
                        ctch->owner, err);
@@ -214,10 +214,7 @@ err_out:
 static void ctch_fini(struct intel_guc *guc,
                      struct intel_guc_ct_channel *ctch)
 {
-       GEM_BUG_ON(!ctch->vma);
-
-       i915_gem_object_unpin_map(ctch->vma->obj);
-       i915_vma_unpin_and_release(&ctch->vma);
+       i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
 }
 
 static int ctch_open(struct intel_guc *guc,
index 1a0f2a39cef9b892fc4e78cdba85fa1d29e9234f..8382d591c7842bb3f292fb3e62558c57b3b6a135 100644 (file)
@@ -49,6 +49,7 @@
 #define   WQ_TYPE_BATCH_BUF            (0x1 << WQ_TYPE_SHIFT)
 #define   WQ_TYPE_PSEUDO               (0x2 << WQ_TYPE_SHIFT)
 #define   WQ_TYPE_INORDER              (0x3 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_NOOP                 (0x4 << WQ_TYPE_SHIFT)
 #define WQ_TARGET_SHIFT                        10
 #define WQ_LEN_SHIFT                   16
 #define WQ_NO_WCFLUSH_WAIT             (1 << 27)
index 6da61a71d28f69835c589bbdf5acc48f19b69197..d3ebdbc0182e745b0d94611b862a6f03afbb8550 100644 (file)
@@ -498,7 +498,7 @@ err:
 
 void intel_guc_log_destroy(struct intel_guc_log *log)
 {
-       i915_vma_unpin_and_release(&log->vma);
+       i915_vma_unpin_and_release(&log->vma, 0);
 }
 
 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
index 4aa5e6463e7b70d47fa459b072e6d93e670f9b25..07b9d313b019acb7635ff798d34c911ccb468b0a 100644 (file)
@@ -317,7 +317,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc)
 
        vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
-               i915_vma_unpin_and_release(&vma);
+               i915_vma_unpin_and_release(&vma, 0);
                return PTR_ERR(vaddr);
        }
 
@@ -331,8 +331,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc)
 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
 {
        ida_destroy(&guc->stage_ids);
-       i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
-       i915_vma_unpin_and_release(&guc->stage_desc_pool);
+       i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
 }
 
 /*
@@ -457,6 +456,9 @@ static void guc_wq_item_append(struct intel_guc_client *client,
         */
        BUILD_BUG_ON(wqi_size != 16);
 
+       /* We expect the WQ to be active if we're appending items to it */
+       GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
+
        /* Free space is guaranteed. */
        wq_off = READ_ONCE(desc->tail);
        GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
@@ -466,15 +468,19 @@ static void guc_wq_item_append(struct intel_guc_client *client,
        /* WQ starts from the page after doorbell / process_desc */
        wqi = client->vaddr + wq_off + GUC_DB_SIZE;
 
-       /* Now fill in the 4-word work queue item */
-       wqi->header = WQ_TYPE_INORDER |
-                     (wqi_len << WQ_LEN_SHIFT) |
-                     (target_engine << WQ_TARGET_SHIFT) |
-                     WQ_NO_WCFLUSH_WAIT;
-       wqi->context_desc = context_desc;
-       wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
-       GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
-       wqi->fence_id = fence_id;
+       if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
+               wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
+       } else {
+               /* Now fill in the 4-word work queue item */
+               wqi->header = WQ_TYPE_INORDER |
+                             (wqi_len << WQ_LEN_SHIFT) |
+                             (target_engine << WQ_TARGET_SHIFT) |
+                             WQ_NO_WCFLUSH_WAIT;
+               wqi->context_desc = context_desc;
+               wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
+               GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+               wqi->fence_id = fence_id;
+       }
 
        /* Make the update visible to GuC */
        WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
@@ -1008,7 +1014,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 err_vaddr:
        i915_gem_object_unpin_map(client->vma->obj);
 err_vma:
-       i915_vma_unpin_and_release(&client->vma);
+       i915_vma_unpin_and_release(&client->vma, 0);
 err_id:
        ida_simple_remove(&guc->stage_ids, client->stage_id);
 err_client:
@@ -1020,8 +1026,7 @@ static void guc_client_free(struct intel_guc_client *client)
 {
        unreserve_doorbell(client);
        guc_stage_desc_fini(client->guc, client);
-       i915_gem_object_unpin_map(client->vma->obj);
-       i915_vma_unpin_and_release(&client->vma);
+       i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
        ida_simple_remove(&client->guc->stage_ids, client->stage_id);
        kfree(client);
 }
index fb081cefef935da19f1618d79b778f945dda24dc..169c54568340e51da5cf9cd95c20a595fbb14f4f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/spinlock.h>
 
 #include "i915_gem.h"
+#include "i915_selftest.h"
 
 struct drm_i915_private;
 
@@ -71,6 +72,9 @@ struct intel_guc_client {
        spinlock_t wq_lock;
        /* Per-engine counts of GuC submissions */
        u64 submissions[I915_NUM_ENGINES];
+
+       /* For testing purposes, use nop WQ items instead of real ones */
+       I915_SELFTEST_DECLARE(bool use_nop_wqi);
 };
 
 int intel_guc_submission_init(struct intel_guc *guc);
index 2fc7a0dd0df9b2bc7a88814f75d98334bfd4d4e9..e26d05a46451fe7f604474aa044e8f1307022177 100644 (file)
@@ -142,7 +142,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
 
-       if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+       if (intel_engine_signaled(signaller, seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
index 0cc6a861bcf83ece9f4a317dd9581dd9741ecf81..26e48fc95543244bb163f5f0f7d5370bda11ed59 100644 (file)
@@ -57,9 +57,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
 
        /* PG1 (power well #1) needs to be enabled */
        for_each_power_well(dev_priv, power_well) {
-               if (power_well->id == id) {
-                       enabled = power_well->ops->is_enabled(dev_priv,
-                                                             power_well);
+               if (power_well->desc->id == id) {
+                       enabled = power_well->desc->ops->is_enabled(dev_priv,
+                                                                   power_well);
                        break;
                }
        }
index 192972a7d287e9fd5ff9aeb599d6730007383500..a2dab0b6bde6a6a1034b8dcc76c27e03b1004f27 100644 (file)
@@ -1911,22 +1911,26 @@ intel_hdmi_set_edid(struct drm_connector *connector)
 static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
-       enum drm_connector_status status;
+       enum drm_connector_status status = connector_status_disconnected;
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
        intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
+       if (IS_ICELAKE(dev_priv) &&
+           !intel_digital_port_connected(encoder))
+               goto out;
+
        intel_hdmi_unset_edid(connector);
 
        if (intel_hdmi_set_edid(connector))
                status = connector_status_connected;
-       else
-               status = connector_status_disconnected;
 
+out:
        intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
        if (status != connector_status_connected)
index ffcad5fad6a7b4e6a24b1e34d53964caaeebfaae..37ef540dd280a2986942d4fb8688de39c4cc8e66 100644 (file)
@@ -63,7 +63,7 @@ int intel_huc_auth(struct intel_huc *huc)
                return -ENOEXEC;
 
        vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
-                                      PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
+                                      PIN_OFFSET_BIAS | i915->ggtt.pin_bias);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
index bef32b7c248e0bdbe42eec64f36e0e3c64e5e5a7..33d87ab93fdd4bd81a221daf9a039e84510d592d 100644 (file)
@@ -37,7 +37,7 @@
 
 struct gmbus_pin {
        const char *name;
-       i915_reg_t reg;
+       enum i915_gpio gpio;
 };
 
 /* Map gmbus pin pairs to names and registers. */
@@ -121,8 +121,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
        else
                size = ARRAY_SIZE(gmbus_pins);
 
-       return pin < size &&
-               i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
+       return pin < size && get_gmbus_pin(dev_priv, pin)->name;
 }
 
 /* Intel GPIO access functions */
@@ -292,8 +291,7 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
 
        algo = &bus->bit_algo;
 
-       bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base +
-                             i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg));
+       bus->gpio_reg = GPIO(get_gmbus_pin(dev_priv, pin)->gpio);
        bus->adapter.algo_data = algo;
        algo->setsda = set_data;
        algo->setscl = set_clock;
@@ -825,9 +823,11 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
        else if (!HAS_GMCH_DISPLAY(dev_priv))
-               dev_priv->gpio_mmio_base =
-                       i915_mmio_reg_offset(PCH_GPIOA) -
-                       i915_mmio_reg_offset(GPIOA);
+               /*
+                * Broxton uses the same PCH offsets for South Display Engine,
+                * even though it doesn't have a PCH.
+                */
+               dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE;
 
        mutex_init(&dev_priv->gmbus_mutex);
        init_waitqueue_head(&dev_priv->gmbus_wait_queue);
index 174479232e94312bc232a7792ded191e960fee49..9b1f0e5211a06a17551897f2088a5cbfe0aa1f3b 100644 (file)
@@ -541,11 +541,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 
        GEM_BUG_ON(execlists->preempt_complete_status !=
                   upper_32_bits(ce->lrc_desc));
-       GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] &
-                   _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-                                      CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) !=
-                  _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-                                     CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
 
        /*
         * Switch to our empty preempt context so
@@ -1277,6 +1272,8 @@ static void execlists_context_destroy(struct intel_context *ce)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
+       i915_gem_context_unpin_hw_id(ce->gem_context);
+
        intel_ring_unpin(ce->ring);
 
        ce->state->obj->pin_global--;
@@ -1303,10 +1300,9 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
        }
 
        flags = PIN_GLOBAL | PIN_HIGH;
-       if (ctx->ggtt_offset_bias)
-               flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+       flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
 
-       return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+       return i915_vma_pin(vma, 0, 0, flags);
 }
 
 static struct intel_context *
@@ -1332,10 +1328,14 @@ __execlists_context_pin(struct intel_engine_cs *engine,
                goto unpin_vma;
        }
 
-       ret = intel_ring_pin(ce->ring, ctx->i915, ctx->ggtt_offset_bias);
+       ret = intel_ring_pin(ce->ring);
        if (ret)
                goto unpin_map;
 
+       ret = i915_gem_context_pin_hw_id(ctx);
+       if (ret)
+               goto unpin_ring;
+
        intel_lr_context_descriptor_update(ctx, engine, ce);
 
        ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1348,6 +1348,8 @@ __execlists_context_pin(struct intel_engine_cs *engine,
        i915_gem_context_get(ctx);
        return ce;
 
+unpin_ring:
+       intel_ring_unpin(ce->ring);
 unpin_map:
        i915_gem_object_unpin_map(ce->state->obj);
 unpin_vma:
@@ -1643,7 +1645,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
                goto err;
        }
 
-       err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
+       err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
        if (err)
                goto err;
 
@@ -1657,7 +1659,7 @@ err:
 
 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
 {
-       i915_vma_unpin_and_release(&engine->wa_ctx.vma);
+       i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
 }
 
 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
@@ -1775,11 +1777,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-       int ret;
-
-       ret = intel_mocs_init_engine(engine);
-       if (ret)
-               return ret;
+       intel_mocs_init_engine(engine);
 
        intel_engine_reset_breadcrumbs(engine);
 
@@ -1838,7 +1836,8 @@ execlists_reset_prepare(struct intel_engine_cs *engine)
        struct i915_request *request, *active;
        unsigned long flags;
 
-       GEM_TRACE("%s\n", engine->name);
+       GEM_TRACE("%s: depth<-%d\n", engine->name,
+                 atomic_read(&execlists->tasklet.count));
 
        /*
         * Prevent request submission to the hardware until we have
@@ -1971,22 +1970,18 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
 
-       /* After a GPU reset, we may have requests to replay */
-       if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
-               tasklet_schedule(&execlists->tasklet);
-
        /*
-        * Flush the tasklet while we still have the forcewake to be sure
-        * that it is not allowed to sleep before we restart and reload a
-        * context.
+        * After a GPU reset, we may have requests to replay. Do so now while
+        * we still have the forcewake to be sure that the GPU is not allowed
+        * to sleep before we restart and reload a context.
         *
-        * As before (with execlists_reset_prepare) we rely on the caller
-        * serialising multiple attempts to reset so that we know that we
-        * are the only one manipulating tasklet state.
         */
-       __tasklet_enable_sync_once(&execlists->tasklet);
+       if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
+               execlists->tasklet.func(execlists->tasklet.data);
 
-       GEM_TRACE("%s\n", engine->name);
+       tasklet_enable(&execlists->tasklet);
+       GEM_TRACE("%s: depth->%d\n", engine->name,
+                 atomic_read(&execlists->tasklet.count));
 }
 
 static int intel_logical_ring_emit_pdps(struct i915_request *rq)
@@ -2066,8 +2061,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 
        /* FIXME(BDW): Address space and security selectors. */
        *cs++ = MI_BATCH_BUFFER_START_GEN8 |
-               (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
-               (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+               (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
        *cs++ = lower_32_bits(offset);
        *cs++ = upper_32_bits(offset);
 
@@ -2494,6 +2488,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
 static u32
 make_rpcs(struct drm_i915_private *dev_priv)
 {
+       bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg;
+       u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
+       u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]);
        u32 rpcs = 0;
 
        /*
@@ -2503,6 +2500,38 @@ make_rpcs(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) < 9)
                return 0;
 
+       /*
+        * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
+        * wide and Icelake has up to eight subslices, specfial programming is
+        * needed in order to correctly enable all subslices.
+        *
+        * According to documentation software must consider the configuration
+        * as 2x4x8 and hardware will translate this to 1x8x8.
+        *
+        * Furthemore, even though SScount is three bits, maximum documented
+        * value for it is four. From this some rules/restrictions follow:
+        *
+        * 1.
+        * If enabled subslice count is greater than four, two whole slices must
+        * be enabled instead.
+        *
+        * 2.
+        * When more than one slice is enabled, hardware ignores the subslice
+        * count altogether.
+        *
+        * From these restrictions it follows that it is not possible to enable
+        * a count of subslices between the SScount maximum of four restriction,
+        * and the maximum available number on a particular SKU. Either all
+        * subslices are enabled, or a count between one and four on the first
+        * slice.
+        */
+       if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) {
+               GEM_BUG_ON(subslices & 1);
+
+               subslice_pg = false;
+               slices *= 2;
+       }
+
        /*
         * Starting in Gen9, render power gating can leave
         * slice/subslice/EU in a partially enabled state. We
@@ -2510,24 +2539,50 @@ make_rpcs(struct drm_i915_private *dev_priv)
         * enablement.
        */
        if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
-               rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-               rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
-                       GEN8_RPCS_S_CNT_SHIFT;
-               rpcs |= GEN8_RPCS_ENABLE;
+               u32 mask, val = slices;
+
+               if (INTEL_GEN(dev_priv) >= 11) {
+                       mask = GEN11_RPCS_S_CNT_MASK;
+                       val <<= GEN11_RPCS_S_CNT_SHIFT;
+               } else {
+                       mask = GEN8_RPCS_S_CNT_MASK;
+                       val <<= GEN8_RPCS_S_CNT_SHIFT;
+               }
+
+               GEM_BUG_ON(val & ~mask);
+               val &= mask;
+
+               rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
        }
 
-       if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
-               rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-               rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) <<
-                       GEN8_RPCS_SS_CNT_SHIFT;
-               rpcs |= GEN8_RPCS_ENABLE;
+       if (subslice_pg) {
+               u32 val = subslices;
+
+               val <<= GEN8_RPCS_SS_CNT_SHIFT;
+
+               GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
+               val &= GEN8_RPCS_SS_CNT_MASK;
+
+               rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
        }
 
        if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
-               rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
-                       GEN8_RPCS_EU_MIN_SHIFT;
-               rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
-                       GEN8_RPCS_EU_MAX_SHIFT;
+               u32 val;
+
+               val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+                     GEN8_RPCS_EU_MIN_SHIFT;
+               GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
+               val &= GEN8_RPCS_EU_MIN_MASK;
+
+               rpcs |= val;
+
+               val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+                     GEN8_RPCS_EU_MAX_SHIFT;
+               GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
+               val &= GEN8_RPCS_EU_MAX_MASK;
+
+               rpcs |= val;
+
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
@@ -2584,11 +2639,13 @@ static void execlists_init_reg_state(u32 *regs,
                                 MI_LRI_FORCE_POSTED;
 
        CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
-               _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-                                   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) |
-               _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
-                                  (HAS_RESOURCE_STREAMER(dev_priv) ?
-                                  CTX_CTRL_RS_CTX_ENABLE : 0)));
+               _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
+               _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
+       if (INTEL_GEN(dev_priv) < 11) {
+               regs[CTX_CONTEXT_CONTROL + 1] |=
+                       _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+                                           CTX_CTRL_RS_CTX_ENABLE);
+       }
        CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
        CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
        CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
@@ -2654,6 +2711,10 @@ static void execlists_init_reg_state(u32 *regs,
 
                i915_oa_init_reg_state(engine, ctx, regs);
        }
+
+       regs[CTX_END] = MI_BATCH_BUFFER_END;
+       if (INTEL_GEN(dev_priv) >= 10)
+               regs[CTX_END] |= BIT(0);
 }
 
 static int
index 4dfb78e3ec7e4dc6f8784ca26d48360c14bcfb51..f5a5502ecf70fa475f0e866acb393f6192892584 100644 (file)
@@ -27,8 +27,6 @@
 #include "intel_ringbuffer.h"
 #include "i915_gem_context.h"
 
-#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
-
 /* Execlists regs */
 #define RING_ELSP(engine)                      _MMIO((engine)->mmio_base + 0x230)
 #define RING_EXECLIST_STATUS_LO(engine)                _MMIO((engine)->mmio_base + 0x234)
index 169a2239d6c7e7456892e9c79c39b3da94fc8f5e..5ef932d810a720a41f48cab185fce79b91ff8b8b 100644 (file)
@@ -37,7 +37,7 @@
 #define CTX_PDP0_LDW                   0x32
 #define CTX_LRI_HEADER_2               0x41
 #define CTX_R_PWR_CLK_STATE            0x42
-#define CTX_GPGPU_CSR_BASE_ADDRESS     0x44
+#define CTX_END                                0x44
 
 #define CTX_REG(reg_state, pos, reg, val) do { \
        u32 *reg_state__ = (reg_state); \
index 9f0bd6a4cb79c191e0be26d59c53d1068dadd67c..77e9871a8c9ac87ca4d2360262d7d815cd8270b0 100644 (file)
@@ -232,20 +232,17 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
  *
  * This function simply emits a MI_LOAD_REGISTER_IMM command for the
  * given table starting at the given address.
- *
- * Return: 0 on success, otherwise the error status.
  */
-int intel_mocs_init_engine(struct intel_engine_cs *engine)
+void intel_mocs_init_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        struct drm_i915_mocs_table table;
        unsigned int index;
 
        if (!get_mocs_settings(dev_priv, &table))
-               return 0;
+               return;
 
-       if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES))
-               return -ENODEV;
+       GEM_BUG_ON(table.size > GEN9_NUM_MOCS_ENTRIES);
 
        for (index = 0; index < table.size; index++)
                I915_WRITE(mocs_register(engine->id, index),
@@ -262,8 +259,6 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
        for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
                I915_WRITE(mocs_register(engine->id, index),
                           table.table[0].control_value);
-
-       return 0;
 }
 
 /**
index d1751f91c1a42ea82e363c08156c0e34ac6b702b..d89080d75b80ddfc21285e5ad2def13899430937 100644 (file)
@@ -54,6 +54,6 @@
 
 int intel_rcs_context_init_mocs(struct i915_request *rq);
 void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
-int intel_mocs_init_engine(struct intel_engine_cs *engine);
+void intel_mocs_init_engine(struct intel_engine_cs *engine);
 
 #endif
index 43ae9de12ba3eb821c12d63e9c7b1ec923eace48..d99e5fabe93c3a822361aa5345faf70b137fe819 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include <linux/cpufreq.h>
+#include <linux/pm_runtime.h>
 #include <drm/drm_plane_helper.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
@@ -2942,8 +2943,8 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
                unsigned int latency = wm[level];
 
                if (latency == 0) {
-                       DRM_ERROR("%s WM%d latency not provided\n",
-                                 name, level);
+                       DRM_DEBUG_KMS("%s WM%d latency not provided\n",
+                                     name, level);
                        continue;
                }
 
@@ -3771,11 +3772,11 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
        return true;
 }
 
-static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv,
-                                      const struct intel_crtc_state *cstate,
-                                      const unsigned int total_data_rate,
-                                      const int num_active,
-                                      struct skl_ddb_allocation *ddb)
+static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
+                             const struct intel_crtc_state *cstate,
+                             const unsigned int total_data_rate,
+                             const int num_active,
+                             struct skl_ddb_allocation *ddb)
 {
        const struct drm_display_mode *adjusted_mode;
        u64 total_data_bw;
@@ -3814,8 +3815,12 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *for_crtc = cstate->base.crtc;
-       unsigned int pipe_size, ddb_size;
-       int nth_active_pipe;
+       const struct drm_crtc_state *crtc_state;
+       const struct drm_crtc *crtc;
+       u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+       enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
+       u16 ddb_size;
+       u32 i;
 
        if (WARN_ON(!state) || !cstate->base.active) {
                alloc->start = 0;
@@ -3833,14 +3838,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
                                      *num_active, ddb);
 
        /*
-        * If the state doesn't change the active CRTC's, then there's
-        * no need to recalculate; the existing pipe allocation limits
-        * should remain unchanged.  Note that we're safe from racing
-        * commits since any racing commit that changes the active CRTC
-        * list would need to grab _all_ crtc locks, including the one
-        * we currently hold.
+        * If the state doesn't change the active CRTC's or there is no
+        * modeset request, then there's no need to recalculate;
+        * the existing pipe allocation limits should remain unchanged.
+        * Note that we're safe from racing commits since any racing commit
+        * that changes the active CRTC list or do modeset would need to
+        * grab _all_ crtc locks, including the one we currently hold.
         */
-       if (!intel_state->active_pipe_changes) {
+       if (!intel_state->active_pipe_changes && !intel_state->modeset) {
                /*
                 * alloc may be cleared by clear_intel_crtc_state,
                 * copy from old state to be sure
@@ -3849,11 +3854,32 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
                return;
        }
 
-       nth_active_pipe = hweight32(intel_state->active_crtcs &
-                                   (drm_crtc_mask(for_crtc) - 1));
-       pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
-       alloc->start = nth_active_pipe * ddb_size / *num_active;
-       alloc->end = alloc->start + pipe_size;
+       /*
+        * Watermark/ddb requirement highly depends upon width of the
+        * framebuffer, So instead of allocating DDB equally among pipes
+        * distribute DDB based on resolution/width of the display.
+        */
+       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+               const struct drm_display_mode *adjusted_mode;
+               int hdisplay, vdisplay;
+               enum pipe pipe;
+
+               if (!crtc_state->enable)
+                       continue;
+
+               pipe = to_intel_crtc(crtc)->pipe;
+               adjusted_mode = &crtc_state->adjusted_mode;
+               drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
+               total_width += hdisplay;
+
+               if (pipe < for_pipe)
+                       width_before_pipe += hdisplay;
+               else if (pipe == for_pipe)
+                       pipe_width = hdisplay;
+       }
+
+       alloc->start = ddb_size * width_before_pipe / total_width;
+       alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
 }
 
 static unsigned int skl_cursor_allocation(int num_active)
@@ -3909,7 +3935,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
                                      val & PLANE_CTL_ALPHA_MASK);
 
        val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-       val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+       /*
+        * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
+        * registers for now.
+        */
+       if (INTEL_GEN(dev_priv) < 11)
+               val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
 
        if (fourcc == DRM_FORMAT_NV12) {
                skl_ddb_entry_init_from_hw(dev_priv,
@@ -4977,6 +5008,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
 
        skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
                            &ddb->plane[pipe][plane_id]);
+       /* FIXME: add proper NV12 support for ICL. */
        if (INTEL_GEN(dev_priv) >= 11)
                return skl_ddb_entry_write(dev_priv,
                                           PLANE_BUF_CFG(pipe, plane_id),
@@ -5141,17 +5173,6 @@ skl_compute_ddb(struct drm_atomic_state *state)
        return 0;
 }
 
-static void
-skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
-                     struct skl_ddb_values *src,
-                     enum pipe pipe)
-{
-       memcpy(dst->ddb.uv_plane[pipe], src->ddb.uv_plane[pipe],
-              sizeof(dst->ddb.uv_plane[pipe]));
-       memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
-              sizeof(dst->ddb.plane[pipe]));
-}
-
 static void
 skl_print_wm_changes(const struct drm_atomic_state *state)
 {
@@ -5259,7 +5280,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
         * any other display updates race with this transaction, so we need
         * to grab the lock on *all* CRTC's.
         */
-       if (intel_state->active_pipe_changes) {
+       if (intel_state->active_pipe_changes || intel_state->modeset) {
                realloc_pipes = ~0;
                intel_state->wm_results.dirty_pipes = ~0;
        }
@@ -5381,7 +5402,10 @@ static void skl_initial_wm(struct intel_atomic_state *state,
        if (cstate->base.active_changed)
                skl_atomic_update_crtc_wm(state, cstate);
 
-       skl_copy_ddb_for_pipe(hw_vals, results, pipe);
+       memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
+              sizeof(hw_vals->ddb.uv_plane[pipe]));
+       memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
+              sizeof(hw_vals->ddb.plane[pipe]));
 
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -6379,7 +6403,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
                new_power = HIGH_POWER;
        rps_set_power(dev_priv, new_power);
        mutex_unlock(&rps->power.mutex);
-       rps->last_adj = 0;
 }
 
 void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
@@ -8159,7 +8182,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
         */
        if (!sanitize_rc6(dev_priv)) {
                DRM_INFO("RC6 disabled, disabling runtime PM support\n");
-               intel_runtime_pm_get(dev_priv);
+               pm_runtime_get(&dev_priv->drm.pdev->dev);
        }
 
        mutex_lock(&dev_priv->pcu_lock);
@@ -8211,7 +8234,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
                valleyview_cleanup_gt_powersave(dev_priv);
 
        if (!HAS_RC6(dev_priv))
-               intel_runtime_pm_put(dev_priv);
+               pm_runtime_put(&dev_priv->drm.pdev->dev);
 }
 
 /**
@@ -8238,7 +8261,7 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
 
        if (INTEL_GEN(dev_priv) >= 11)
                gen11_reset_rps_interrupts(dev_priv);
-       else
+       else if (INTEL_GEN(dev_priv) >= 6)
                gen6_reset_rps_interrupts(dev_priv);
 }
 
index 4bd5768731ee26b282b5ad92f8d446af1f21e7a0..b6838b525502ea68f8d472dbd703d51bfa1d6561 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
+static bool psr_global_enabled(u32 debug)
+{
+       switch (debug & I915_PSR_DEBUG_MODE_MASK) {
+       case I915_PSR_DEBUG_DEFAULT:
+               return i915_modparams.enable_psr;
+       case I915_PSR_DEBUG_DISABLE:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
+                              const struct intel_crtc_state *crtc_state)
+{
+       switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+       case I915_PSR_DEBUG_FORCE_PSR1:
+               return false;
+       default:
+               return crtc_state->has_psr2;
+       }
+}
+
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
 {
        u32 debug_mask, mask;
 
@@ -77,10 +100,9 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
                              EDP_PSR_PRE_ENTRY(TRANSCODER_C);
        }
 
-       if (debug)
+       if (debug & I915_PSR_DEBUG_IRQ)
                mask |= debug_mask;
 
-       WRITE_ONCE(dev_priv->psr.debug, debug);
        I915_WRITE(EDP_PSR_IMR, ~mask);
 }
 
@@ -213,6 +235,9 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        dev_priv->psr.sink_sync_latency =
                intel_dp_get_sink_sync_latency(intel_dp);
 
+       WARN_ON(dev_priv->psr.dp);
+       dev_priv->psr.dp = intel_dp;
+
        if (INTEL_GEN(dev_priv) >= 9 &&
            (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
                bool y_req = intel_dp->psr_dpcd[1] &
@@ -245,7 +270,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
                                const struct intel_crtc_state *crtc_state)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct edp_vsc_psr psr_vsc;
 
        if (dev_priv->psr.psr2_enabled) {
@@ -275,8 +300,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
 
 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 aux_clock_divider, aux_ctl;
        int i;
        static const uint8_t aux_msg[] = {
@@ -309,9 +333,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 
 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u8 dpcd_val = DP_PSR_ENABLE;
 
        /* Enable ALPM at sink for psr2 */
@@ -332,9 +354,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 
 static void hsw_activate_psr1(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 max_sleep_time = 0x1f;
        u32 val = EDP_PSR_ENABLE;
 
@@ -389,9 +409,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
 
 static void hsw_activate_psr2(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 val;
 
        /* Let's use 6 as the minimum to cover all known cases including the
@@ -427,8 +445,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                                    struct intel_crtc_state *crtc_state)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
        int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
        int psr_max_h = 0, psr_max_v = 0;
@@ -463,7 +480,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
        int psr_setup_time;
@@ -471,10 +488,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
        if (!CAN_PSR(dev_priv))
                return;
 
-       if (!i915_modparams.enable_psr) {
-               DRM_DEBUG_KMS("PSR disable by flag\n");
+       if (intel_dp != dev_priv->psr.dp)
                return;
-       }
 
        /*
         * HSW spec explicitly says PSR is tied to port A.
@@ -517,14 +532,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 
        crtc_state->has_psr = true;
        crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
-       DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
 }
 
 static void intel_psr_activate(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (INTEL_GEN(dev_priv) >= 9)
                WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
@@ -544,9 +556,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
 static void intel_psr_enable_source(struct intel_dp *intel_dp,
                                    const struct intel_crtc_state *crtc_state)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
        /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
@@ -589,6 +599,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
        }
 }
 
+static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+                                   const struct intel_crtc_state *crtc_state)
+{
+       struct intel_dp *intel_dp = dev_priv->psr.dp;
+
+       if (dev_priv->psr.enabled)
+               return;
+
+       DRM_DEBUG_KMS("Enabling PSR%s\n",
+                     dev_priv->psr.psr2_enabled ? "2" : "1");
+       intel_psr_setup_vsc(intel_dp, crtc_state);
+       intel_psr_enable_sink(intel_dp);
+       intel_psr_enable_source(intel_dp, crtc_state);
+       dev_priv->psr.enabled = true;
+
+       intel_psr_activate(intel_dp);
+}
+
 /**
  * intel_psr_enable - Enable PSR
  * @intel_dp: Intel DP
@@ -599,9 +627,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 void intel_psr_enable(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (!crtc_state->has_psr)
                return;
@@ -610,21 +636,21 @@ void intel_psr_enable(struct intel_dp *intel_dp,
                return;
 
        WARN_ON(dev_priv->drrs.dp);
+
        mutex_lock(&dev_priv->psr.lock);
-       if (dev_priv->psr.enabled) {
+       if (dev_priv->psr.prepared) {
                DRM_DEBUG_KMS("PSR already in use\n");
                goto unlock;
        }
 
-       dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
+       dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
        dev_priv->psr.busy_frontbuffer_bits = 0;
+       dev_priv->psr.prepared = true;
 
-       intel_psr_setup_vsc(intel_dp, crtc_state);
-       intel_psr_enable_sink(intel_dp);
-       intel_psr_enable_source(intel_dp, crtc_state);
-       dev_priv->psr.enabled = intel_dp;
-
-       intel_psr_activate(intel_dp);
+       if (psr_global_enabled(dev_priv->psr.debug))
+               intel_psr_enable_locked(dev_priv, crtc_state);
+       else
+               DRM_DEBUG_KMS("PSR disabled by flag\n");
 
 unlock:
        mutex_unlock(&dev_priv->psr.lock);
@@ -633,9 +659,7 @@ unlock:
 static void
 intel_psr_disable_source(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (dev_priv->psr.active) {
                i915_reg_t psr_status;
@@ -674,21 +698,21 @@ intel_psr_disable_source(struct intel_dp *intel_dp)
 
 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        lockdep_assert_held(&dev_priv->psr.lock);
 
        if (!dev_priv->psr.enabled)
                return;
 
+       DRM_DEBUG_KMS("Disabling PSR%s\n",
+                     dev_priv->psr.psr2_enabled ? "2" : "1");
        intel_psr_disable_source(intel_dp);
 
        /* Disable PSR on Sink */
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 
-       dev_priv->psr.enabled = NULL;
+       dev_priv->psr.enabled = false;
 }
 
 /**
@@ -701,9 +725,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 void intel_psr_disable(struct intel_dp *intel_dp,
                       const struct intel_crtc_state *old_crtc_state)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        if (!old_crtc_state->has_psr)
                return;
@@ -712,57 +734,61 @@ void intel_psr_disable(struct intel_dp *intel_dp,
                return;
 
        mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.prepared) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
        intel_psr_disable_locked(intel_dp);
+
+       dev_priv->psr.prepared = false;
        mutex_unlock(&dev_priv->psr.lock);
        cancel_work_sync(&dev_priv->psr.work);
 }
 
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ * @out_value: PSR status in case of failure
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ *
+ * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ */
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+                           u32 *out_value)
 {
        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       i915_reg_t reg;
-       u32 mask;
 
-       if (!new_crtc_state->has_psr)
+       if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
                return 0;
 
-       /*
-        * The sole user right now is intel_pipe_update_start(),
-        * which won't race with psr_enable/disable, which is
-        * where psr2_enabled is written to. So, we don't need
-        * to acquire the psr.lock. More importantly, we want the
-        * latency inside intel_pipe_update_start() to be as low
-        * as possible, so no need to acquire psr.lock when it is
-        * not needed and will induce latencies in the atomic
-        * update path.
-        */
-       if (dev_priv->psr.psr2_enabled) {
-               reg = EDP_PSR2_STATUS;
-               mask = EDP_PSR2_STATUS_STATE_MASK;
-       } else {
-               reg = EDP_PSR_STATUS;
-               mask = EDP_PSR_STATUS_STATE_MASK;
-       }
+       /* FIXME: Update this for PSR2 if we need to wait for idle */
+       if (READ_ONCE(dev_priv->psr.psr2_enabled))
+               return 0;
 
        /*
-        * Max time for PSR to idle = Inverse of the refresh rate +
-        * 6 ms of exit training time + 1.5 ms of aux channel
-        * handshake. 50 msec is defesive enough to cover everything.
+        * From bspec: Panel Self Refresh (BDW+)
+        * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+        * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+        * defensive enough to cover everything.
         */
-       return intel_wait_for_register(dev_priv, reg, mask,
-                                      EDP_PSR_STATUS_STATE_IDLE, 50);
+
+       return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+                                        EDP_PSR_STATUS_STATE_MASK,
+                                        EDP_PSR_STATUS_STATE_IDLE, 2, 50,
+                                        out_value);
 }
 
 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
 {
-       struct intel_dp *intel_dp;
        i915_reg_t reg;
        u32 mask;
        int err;
 
-       intel_dp = dev_priv->psr.enabled;
-       if (!intel_dp)
+       if (!dev_priv->psr.enabled)
                return false;
 
        if (dev_priv->psr.psr2_enabled) {
@@ -784,6 +810,89 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
        return err == 0 && dev_priv->psr.enabled;
 }
 
+static bool switching_psr(struct drm_i915_private *dev_priv,
+                         struct intel_crtc_state *crtc_state,
+                         u32 mode)
+{
+       /* Can't switch psr state anyway if PSR2 is not supported. */
+       if (!crtc_state || !crtc_state->has_psr2)
+               return false;
+
+       if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
+               return true;
+
+       if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
+               return true;
+
+       return false;
+}
+
+int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
+                              struct drm_modeset_acquire_ctx *ctx,
+                              u64 val)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_connector_state *conn_state;
+       struct intel_crtc_state *crtc_state = NULL;
+       struct drm_crtc_commit *commit;
+       struct drm_crtc *crtc;
+       struct intel_dp *dp;
+       int ret;
+       bool enable;
+       u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+
+       if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
+           mode > I915_PSR_DEBUG_FORCE_PSR1) {
+               DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
+               return -EINVAL;
+       }
+
+       ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+       if (ret)
+               return ret;
+
+       /* dev_priv->psr.dp should be set once and then never touched again. */
+       dp = READ_ONCE(dev_priv->psr.dp);
+       conn_state = dp->attached_connector->base.state;
+       crtc = conn_state->crtc;
+       if (crtc) {
+               ret = drm_modeset_lock(&crtc->mutex, ctx);
+               if (ret)
+                       return ret;
+
+               crtc_state = to_intel_crtc_state(crtc->state);
+               commit = crtc_state->base.commit;
+       } else {
+               commit = conn_state->commit;
+       }
+       if (commit) {
+               ret = wait_for_completion_interruptible(&commit->hw_done);
+               if (ret)
+                       return ret;
+       }
+
+       ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+       if (ret)
+               return ret;
+
+       enable = psr_global_enabled(val);
+
+       if (!enable || switching_psr(dev_priv, crtc_state, mode))
+               intel_psr_disable_locked(dev_priv->psr.dp);
+
+       dev_priv->psr.debug = val;
+       if (crtc)
+               dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+
+       intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+       if (dev_priv->psr.prepared && enable)
+               intel_psr_enable_locked(dev_priv, crtc_state);
+
+       mutex_unlock(&dev_priv->psr.lock);
+       return ret;
+}
+
 static void intel_psr_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
@@ -811,7 +920,7 @@ static void intel_psr_work(struct work_struct *work)
        if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
                goto unlock;
 
-       intel_psr_activate(dev_priv->psr.enabled);
+       intel_psr_activate(dev_priv->psr.dp);
 unlock:
        mutex_unlock(&dev_priv->psr.lock);
 }
@@ -866,7 +975,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
                return;
        }
 
-       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+       crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
 
        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -909,7 +1018,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
                return;
        }
 
-       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+       crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
 
        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -977,9 +1086,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 
 void intel_psr_short_pulse(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct i915_psr *psr = &dev_priv->psr;
        u8 val;
        const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
@@ -991,7 +1098,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 
        mutex_lock(&psr->lock);
 
-       if (psr->enabled != intel_dp)
+       if (!psr->enabled || psr->dp != intel_dp)
                goto exit;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
index 6a8f27d0a7429e6d3a01f46e9b768031680519b6..472939f5c18fc8b648e40914a28a49325cb9b669 100644 (file)
@@ -344,11 +344,14 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
+       struct page *page = virt_to_page(engine->status_page.page_addr);
+       phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
        u32 addr;
 
-       addr = dev_priv->status_page_dmah->busaddr;
+       addr = lower_32_bits(phys);
        if (INTEL_GEN(dev_priv) >= 4)
-               addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+               addr |= (phys >> 28) & 0xf0;
+
        I915_WRITE(HWS_PGA, addr);
 }
 
@@ -537,6 +540,8 @@ static int init_ring_common(struct intel_engine_cs *engine)
        if (INTEL_GEN(dev_priv) > 2)
                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 
+       /* Papering over lost _interrupts_ immediately following the restart */
+       intel_engine_wakeup(engine);
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
@@ -1013,24 +1018,22 @@ i915_emit_bb_start(struct i915_request *rq,
        return 0;
 }
 
-
-
-int intel_ring_pin(struct intel_ring *ring,
-                  struct drm_i915_private *i915,
-                  unsigned int offset_bias)
+int intel_ring_pin(struct intel_ring *ring)
 {
-       enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
        struct i915_vma *vma = ring->vma;
+       enum i915_map_type map =
+               HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
        unsigned int flags;
        void *addr;
        int ret;
 
        GEM_BUG_ON(ring->vaddr);
 
-
        flags = PIN_GLOBAL;
-       if (offset_bias)
-               flags |= PIN_OFFSET_BIAS | offset_bias;
+
+       /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+       flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
        if (vma->obj->stolen)
                flags |= PIN_MAPPABLE;
        else
@@ -1045,7 +1048,7 @@ int intel_ring_pin(struct intel_ring *ring,
                        return ret;
        }
 
-       ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
+       ret = i915_vma_pin(vma, 0, 0, flags);
        if (unlikely(ret))
                return ret;
 
@@ -1230,8 +1233,7 @@ static int __context_pin(struct intel_context *ce)
                        return err;
        }
 
-       err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
-                          PIN_GLOBAL | PIN_HIGH);
+       err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
        if (err)
                return err;
 
@@ -1419,8 +1421,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
                goto err;
        }
 
-       /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-       err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
+       err = intel_ring_pin(ring);
        if (err)
                goto err_ring;
 
@@ -1706,9 +1707,29 @@ static int switch_context(struct i915_request *rq)
        }
 
        if (ppgtt) {
+               ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+               if (ret)
+                       goto err_mm;
+
                ret = flush_pd_dir(rq);
                if (ret)
                        goto err_mm;
+
+               /*
+                * Not only do we need a full barrier (post-sync write) after
+                * invalidating the TLBs, but we need to wait a little bit
+                * longer. Whether this is merely delaying us, or the
+                * subsequent flush is a key part of serialising with the
+                * post-sync op, this extra pass appears vital before a
+                * mm switch!
+                */
+               ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+               if (ret)
+                       goto err_mm;
+
+               ret = engine->emit_flush(rq, EMIT_FLUSH);
+               if (ret)
+                       goto err_mm;
        }
 
        if (ctx->remap_slice) {
@@ -1946,7 +1967,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
+static int mi_flush_dw(struct i915_request *rq, u32 flags)
 {
        u32 cmd, *cs;
 
@@ -1956,7 +1977,8 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
 
        cmd = MI_FLUSH_DW;
 
-       /* We always require a command barrier so that subsequent
+       /*
+        * We always require a command barrier so that subsequent
         * commands, such as breadcrumb interrupts, are strictly ordered
         * wrt the contents of the write cache being flushed to memory
         * (and thus being coherent from the CPU).
@@ -1964,22 +1986,33 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
        cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
        /*
-        * Bspec vol 1c.5 - video engine command streamer:
+        * Bspec vol 1c.3 - blitter engine command streamer:
         * "If ENABLED, all TLBs will be invalidated once the flush
         * operation is complete. This bit is only valid when the
         * Post-Sync Operation field is a value of 1h or 3h."
         */
-       if (mode & EMIT_INVALIDATE)
-               cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+       cmd |= flags;
 
        *cs++ = cmd;
        *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
        *cs++ = 0;
        *cs++ = MI_NOOP;
+
        intel_ring_advance(rq, cs);
+
        return 0;
 }
 
+static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
+{
+       return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
+}
+
+static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
+{
+       return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
+}
+
 static int
 hsw_emit_bb_start(struct i915_request *rq,
                  u64 offset, u32 len,
@@ -1992,9 +2025,7 @@ hsw_emit_bb_start(struct i915_request *rq,
                return PTR_ERR(cs);
 
        *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
-               0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-               (dispatch_flags & I915_DISPATCH_RS ?
-               MI_BATCH_RESOURCE_STREAMER : 0);
+               0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
        /* bit0-7 is the length on GEN6+ */
        *cs++ = offset;
        intel_ring_advance(rq, cs);
@@ -2026,36 +2057,7 @@ gen6_emit_bb_start(struct i915_request *rq,
 
 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
 {
-       u32 cmd, *cs;
-
-       cs = intel_ring_begin(rq, 4);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       cmd = MI_FLUSH_DW;
-
-       /* We always require a command barrier so that subsequent
-        * commands, such as breadcrumb interrupts, are strictly ordered
-        * wrt the contents of the write cache being flushed to memory
-        * (and thus being coherent from the CPU).
-        */
-       cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
-       /*
-        * Bspec vol 1c.3 - blitter engine command streamer:
-        * "If ENABLED, all TLBs will be invalidated once the flush
-        * operation is complete. This bit is only valid when the
-        * Post-Sync Operation field is a value of 1h or 3h."
-        */
-       if (mode & EMIT_INVALIDATE)
-               cmd |= MI_INVALIDATE_TLB;
-       *cs++ = cmd;
-       *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
-       *cs++ = 0;
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
-
-       return 0;
+       return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
 }
 
 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
index f5ffa6d31e82c3d19a4ceb8e1b0e78956d0eae3d..3f6920dd78806abc8e6fce953186391e04cb5629 100644 (file)
@@ -474,7 +474,6 @@ struct intel_engine_cs {
                                         unsigned int dispatch_flags);
 #define I915_DISPATCH_SECURE BIT(0)
 #define I915_DISPATCH_PINNED BIT(1)
-#define I915_DISPATCH_RS     BIT(2)
        void            (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
        int             emit_breadcrumb_sz;
 
@@ -797,9 +796,7 @@ struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine,
                         struct i915_timeline *timeline,
                         int size);
-int intel_ring_pin(struct intel_ring *ring,
-                  struct drm_i915_private *i915,
-                  unsigned int offset_bias);
+int intel_ring_pin(struct intel_ring *ring);
 void intel_ring_reset(struct intel_ring *ring, u32 tail);
 unsigned int intel_ring_update_space(struct intel_ring *ring);
 void intel_ring_unpin(struct intel_ring *ring);
@@ -909,18 +906,15 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
 int intel_engine_stop_cs(struct intel_engine_cs *engine);
+void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 {
-       /* We are only peeking at the tail of the submit queue (and not the
+       /*
+        * We are only peeking at the tail of the submit queue (and not the
         * queue itself) in order to gain a hint as to the current active
         * state of the engine. Callers are not expected to be taking
         * engine->timeline->lock, nor are they expected to be concerned
@@ -930,6 +924,31 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
        return READ_ONCE(engine->timeline.seqno);
 }
 
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
+
+static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
+                                        u32 seqno)
+{
+       return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
+}
+
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+                                             u32 seqno)
+{
+       GEM_BUG_ON(!seqno);
+       return intel_engine_signaled(engine, seqno);
+}
+
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+                                           u32 seqno)
+{
+       GEM_BUG_ON(!seqno);
+       return intel_engine_signaled(engine, seqno - 1);
+}
+
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
                               struct intel_instdone *instdone);
 
index 6b5aa3b074ecc8ffb11ae61ead04bc2ea0ee7bb2..480dadb1047bfe9cd3729fa56f740ea9b8fe3c70 100644 (file)
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
                                         enum i915_power_well_id power_well_id);
 
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
-                 enum i915_power_well_id power_well_id);
-
 const char *
 intel_display_power_domain_str(enum intel_display_power_domain domain)
 {
@@ -159,17 +155,17 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
                                    struct i915_power_well *power_well)
 {
-       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-       power_well->ops->enable(dev_priv, power_well);
+       DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+       power_well->desc->ops->enable(dev_priv, power_well);
        power_well->hw_enabled = true;
 }
 
 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
                                     struct i915_power_well *power_well)
 {
-       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+       DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
        power_well->hw_enabled = false;
-       power_well->ops->disable(dev_priv, power_well);
+       power_well->desc->ops->disable(dev_priv, power_well);
 }
 
 static void intel_power_well_get(struct drm_i915_private *dev_priv,
@@ -183,7 +179,7 @@ static void intel_power_well_put(struct drm_i915_private *dev_priv,
                                 struct i915_power_well *power_well)
 {
        WARN(!power_well->count, "Use count on power well %s is already zero",
-            power_well->name);
+            power_well->desc->name);
 
        if (!--power_well->count)
                intel_power_well_disable(dev_priv, power_well);
@@ -213,7 +209,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
        is_enabled = true;
 
        for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
-               if (power_well->always_on)
+               if (power_well->desc->always_on)
                        continue;
 
                if (!power_well->hw_enabled) {
@@ -257,30 +253,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-/**
- * intel_display_set_init_power - set the initial power domain state
- * @dev_priv: i915 device instance
- * @enable: whether to enable or disable the initial power domain state
- *
- * For simplicity our driver load/unload and system suspend/resume code assumes
- * that all power domains are always enabled. This functions controls the state
- * of this little hack. While the initial power domain state is enabled runtime
- * pm is effectively disabled.
- */
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
-                                 bool enable)
-{
-       if (dev_priv->power_domains.init_power_on == enable)
-               return;
-
-       if (enable)
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-       else
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
-       dev_priv->power_domains.init_power_on = enable;
-}
-
 /*
  * Starting with Haswell, we have a "Power Down Well" that can be turned off
  * when not needed anymore. We have 4 registers that can request the power well
@@ -323,26 +295,29 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
 
        /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
        WARN_ON(intel_wait_for_register(dev_priv,
-                                       HSW_PWR_WELL_CTL_DRIVER(id),
-                                       HSW_PWR_WELL_CTL_STATE(id),
-                                       HSW_PWR_WELL_CTL_STATE(id),
+                                       regs->driver,
+                                       HSW_PWR_WELL_CTL_STATE(pw_idx),
+                                       HSW_PWR_WELL_CTL_STATE(pw_idx),
                                        1));
 }
 
 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
-                                    enum i915_power_well_id id)
+                                    const struct i915_power_well_regs *regs,
+                                    int pw_idx)
 {
-       u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
+       u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
        u32 ret;
 
-       ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
-       ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
-       ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
-       ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
+       ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
+       ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+       if (regs->kvmr.reg)
+               ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
+       ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
 
        return ret;
 }
@@ -350,7 +325,8 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
                                            struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
        bool disabled;
        u32 reqs;
 
@@ -363,14 +339,14 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
         * Skip the wait in case any of the request bits are set and print a
         * diagnostic message.
         */
-       wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
-                              HSW_PWR_WELL_CTL_STATE(id))) ||
-                (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
+       wait_for((disabled = !(I915_READ(regs->driver) &
+                              HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+                (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
        if (disabled)
                return;
 
        DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
-                     power_well->name,
+                     power_well->desc->name,
                      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 }
 
@@ -386,14 +362,15 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
                                  struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
-       bool wait_fuses = power_well->hsw.has_fuses;
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       bool wait_fuses = power_well->desc->hsw.has_fuses;
        enum skl_power_gate uninitialized_var(pg);
        u32 val;
 
        if (wait_fuses) {
-               pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
-                                                SKL_PW_TO_PG(id);
+               pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+                                                SKL_PW_CTL_IDX_TO_PG(pw_idx);
                /*
                 * For PW1 we have to wait both for the PW0/PG0 fuse state
                 * before enabling the power well and PW1/PG1's own fuse
@@ -405,52 +382,55 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
                        gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
        }
 
-       val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
-       I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
        hsw_wait_for_power_well_enable(dev_priv, power_well);
 
        /* Display WA #1178: cnl */
        if (IS_CANNONLAKE(dev_priv) &&
-           (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
-            id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
-               val = I915_READ(CNL_AUX_ANAOVRD1(id));
+           pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
+           pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+               val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
                val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
-               I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
+               I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
        }
 
        if (wait_fuses)
                gen9_wait_for_power_well_fuses(dev_priv, pg);
 
-       hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
-                                  power_well->hsw.has_vga);
+       hsw_power_well_post_enable(dev_priv,
+                                  power_well->desc->hsw.irq_pipe_mask,
+                                  power_well->desc->hsw.has_vga);
 }
 
 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
        u32 val;
 
-       hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
+       hsw_power_well_pre_disable(dev_priv,
+                                  power_well->desc->hsw.irq_pipe_mask);
 
-       val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
-       I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
-                  val & ~HSW_PWR_WELL_CTL_REQ(id));
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
        hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
 
-#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A)
+#define ICL_AUX_PW_TO_PORT(pw_idx)     ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
 
 static void
 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
                                    struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
-       enum port port = ICL_AUX_PW_TO_PORT(id);
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
        u32 val;
 
-       val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
-       I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 
        val = I915_READ(ICL_PORT_CL_DW12(port));
        I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
@@ -462,16 +442,16 @@ static void
 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
                                     struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
-       enum port port = ICL_AUX_PW_TO_PORT(id);
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
        u32 val;
 
        val = I915_READ(ICL_PORT_CL_DW12(port));
        I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
 
-       val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
-       I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
-                  val & ~HSW_PWR_WELL_CTL_REQ(id));
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 
        hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
@@ -484,22 +464,22 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
-       u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+                  HSW_PWR_WELL_CTL_STATE(pw_idx);
 
-       return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
+       return (I915_READ(regs->driver) & mask) == mask;
 }
 
 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 {
-       enum i915_power_well_id id = SKL_DISP_PW_2;
-
        WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
                  "DC9 already programmed to be enabled.\n");
        WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
                  "DC5 still not disabled to enable DC9.\n");
-       WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
-                 HSW_PWR_WELL_CTL_REQ(id),
+       WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
+                 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
                  "Power well 2 on.\n");
        WARN_ONCE(intel_irqs_enabled(dev_priv),
                  "Interrupts not disabled yet.\n");
@@ -668,6 +648,27 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv)
        WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
 }
 
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv,
+                 enum i915_power_well_id power_well_id)
+{
+       struct i915_power_well *power_well;
+
+       for_each_power_well(dev_priv, power_well)
+               if (power_well->desc->id == power_well_id)
+                       return power_well;
+
+       /*
+        * It's not feasible to add error checking code to the callers since
+        * this condition really shouldn't happen and it doesn't even make sense
+        * to abort things like display initialization sequences. Just return
+        * the first power well and hope the WARN gets reported so we can fix
+        * our driver.
+        */
+       WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+       return &dev_priv->power_domains.power_wells[0];
+}
+
 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 {
        bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
@@ -723,54 +724,57 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv)
 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
-       enum i915_power_well_id id = power_well->id;
-       u32 mask = HSW_PWR_WELL_CTL_REQ(id);
-       u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+       u32 bios_req = I915_READ(regs->bios);
 
        /* Take over the request bit if set by BIOS. */
        if (bios_req & mask) {
-               u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+               u32 drv_req = I915_READ(regs->driver);
 
                if (!(drv_req & mask))
-                       I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
-               I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
+                       I915_WRITE(regs->driver, drv_req | mask);
+               I915_WRITE(regs->bios, bios_req & ~mask);
        }
 }
 
 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
-       bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
+       bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
 }
 
 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
                                            struct i915_power_well *power_well)
 {
-       bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
+       bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
 }
 
 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
                                            struct i915_power_well *power_well)
 {
-       return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
+       return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
 }
 
 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
 {
        struct i915_power_well *power_well;
 
-       power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+       power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
        if (power_well->count > 0)
-               bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
+               bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
 
-       power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
+       power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
        if (power_well->count > 0)
-               bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
+               bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
 
        if (IS_GEMINILAKE(dev_priv)) {
-               power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
+               power_well = lookup_power_well(dev_priv,
+                                              GLK_DISP_PW_DPIO_CMN_C);
                if (power_well->count > 0)
-                       bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
+                       bxt_ddi_phy_verify_state(dev_priv,
+                                                power_well->desc->bxt.phy);
        }
 }
 
@@ -869,14 +873,14 @@ static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
                               struct i915_power_well *power_well, bool enable)
 {
-       enum i915_power_well_id power_well_id = power_well->id;
+       int pw_idx = power_well->desc->vlv.idx;
        u32 mask;
        u32 state;
        u32 ctrl;
 
-       mask = PUNIT_PWRGT_MASK(power_well_id);
-       state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
-                        PUNIT_PWRGT_PWR_GATE(power_well_id);
+       mask = PUNIT_PWRGT_MASK(pw_idx);
+       state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+                        PUNIT_PWRGT_PWR_GATE(pw_idx);
 
        mutex_lock(&dev_priv->pcu_lock);
 
@@ -917,14 +921,14 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
-       enum i915_power_well_id power_well_id = power_well->id;
+       int pw_idx = power_well->desc->vlv.idx;
        bool enabled = false;
        u32 mask;
        u32 state;
        u32 ctrl;
 
-       mask = PUNIT_PWRGT_MASK(power_well_id);
-       ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+       mask = PUNIT_PWRGT_MASK(pw_idx);
+       ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
 
        mutex_lock(&dev_priv->pcu_lock);
 
@@ -933,8 +937,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
         * We only ever set the power-on and power-gate states, anything
         * else is unexpected.
         */
-       WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
-               state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+       WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+               state != PUNIT_PWRGT_PWR_GATE(pw_idx));
        if (state == ctrl)
                enabled = true;
 
@@ -1045,8 +1049,6 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
                                          struct i915_power_well *power_well)
 {
-       WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
-
        vlv_set_power_well(dev_priv, power_well, true);
 
        vlv_display_power_well_init(dev_priv);
@@ -1055,8 +1057,6 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
-       WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
-
        vlv_display_power_well_deinit(dev_priv);
 
        vlv_set_power_well(dev_priv, power_well, false);
@@ -1065,8 +1065,6 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
-       WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
        /* since ref/cri clock was enabled */
        udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
@@ -1091,8 +1089,6 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 {
        enum pipe pipe;
 
-       WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
        for_each_pipe(dev_priv, pipe)
                assert_pll_disabled(dev_priv, pipe);
 
@@ -1104,32 +1100,14 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 
 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
 
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
-                 enum i915_power_well_id power_well_id)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       int i;
-
-       for (i = 0; i < power_domains->power_well_count; i++) {
-               struct i915_power_well *power_well;
-
-               power_well = &power_domains->power_wells[i];
-               if (power_well->id == power_well_id)
-                       return power_well;
-       }
-
-       return NULL;
-}
-
 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
 
 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
 {
        struct i915_power_well *cmn_bc =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
        struct i915_power_well *cmn_d =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+               lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
        u32 phy_control = dev_priv->chv_phy_control;
        u32 phy_status = 0;
        u32 phy_status_mask = 0xffffffff;
@@ -1154,7 +1132,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
 
-       if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+       if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
                phy_status |= PHY_POWERGOOD(DPIO_PHY0);
 
                /* this assumes override is only used to enable lanes */
@@ -1195,7 +1173,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
        }
 
-       if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+       if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
                phy_status |= PHY_POWERGOOD(DPIO_PHY1);
 
                /* this assumes override is only used to enable lanes */
@@ -1239,10 +1217,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
        enum pipe pipe;
        uint32_t tmp;
 
-       WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-                    power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
+       WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+                    power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
 
-       if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
                pipe = PIPE_A;
                phy = DPIO_PHY0;
        } else {
@@ -1270,7 +1248,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
                DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
 
-       if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
                tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
                tmp |= DPIO_DYNPWRDOWNEN_CH1;
                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@@ -1301,10 +1279,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 {
        enum dpio_phy phy;
 
-       WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-                    power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
+       WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+                    power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
 
-       if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
                phy = DPIO_PHY0;
                assert_pll_disabled(dev_priv, PIPE_A);
                assert_pll_disabled(dev_priv, PIPE_B);
@@ -1516,8 +1494,6 @@ out:
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
                                       struct i915_power_well *power_well)
 {
-       WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
-
        chv_set_pipe_power_well(dev_priv, power_well, true);
 
        vlv_display_power_well_init(dev_priv);
@@ -1526,8 +1502,6 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
                                        struct i915_power_well *power_well)
 {
-       WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
-
        vlv_display_power_well_deinit(dev_priv);
 
        chv_set_pipe_power_well(dev_priv, power_well, false);
@@ -2063,13 +2037,13 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
        .is_enabled = vlv_power_well_enabled,
 };
 
-static struct i915_power_well i9xx_always_on_power_well[] = {
+static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
 };
 
@@ -2080,19 +2054,19 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
        .is_enabled = i830_pipes_power_well_enabled,
 };
 
-static struct i915_power_well i830_power_wells[] = {
+static const struct i915_power_well_desc i830_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "pipes",
                .domains = I830_PIPES_POWER_DOMAINS,
                .ops = &i830_pipes_power_well_ops,
-               .id = I830_DISP_PW_PIPES,
+               .id = DISP_PW_ID_NONE,
        },
 };
 
@@ -2117,13 +2091,20 @@ static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
        .is_enabled = bxt_dpio_cmn_power_well_enabled,
 };
 
-static struct i915_power_well hsw_power_wells[] = {
+static const struct i915_power_well_regs hsw_power_well_regs = {
+       .bios   = HSW_PWR_WELL_CTL1,
+       .driver = HSW_PWR_WELL_CTL2,
+       .kvmr   = HSW_PWR_WELL_CTL3,
+       .debug  = HSW_PWR_WELL_CTL4,
+};
+
+static const struct i915_power_well_desc hsw_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "display",
@@ -2131,18 +2112,20 @@ static struct i915_power_well hsw_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = HSW_DISP_PW_GLOBAL,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
                        .hsw.has_vga = true,
                },
        },
 };
 
-static struct i915_power_well bdw_power_wells[] = {
+static const struct i915_power_well_desc bdw_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "display",
@@ -2150,6 +2133,8 @@ static struct i915_power_well bdw_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = HSW_DISP_PW_GLOBAL,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
                        .hsw.has_vga = true,
                },
@@ -2177,19 +2162,22 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
        .is_enabled = vlv_power_well_enabled,
 };
 
-static struct i915_power_well vlv_power_wells[] = {
+static const struct i915_power_well_desc vlv_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "display",
                .domains = VLV_DISPLAY_POWER_DOMAINS,
-               .id = PUNIT_POWER_WELL_DISP2D,
                .ops = &vlv_display_power_well_ops,
+               .id = VLV_DISP_PW_DISP2D,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+               },
        },
        {
                .name = "dpio-tx-b-01",
@@ -2198,7 +2186,10 @@ static struct i915_power_well vlv_power_wells[] = {
                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
                .ops = &vlv_dpio_power_well_ops,
-               .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
+               },
        },
        {
                .name = "dpio-tx-b-23",
@@ -2207,7 +2198,10 @@ static struct i915_power_well vlv_power_wells[] = {
                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
                .ops = &vlv_dpio_power_well_ops,
-               .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
+               },
        },
        {
                .name = "dpio-tx-c-01",
@@ -2216,7 +2210,10 @@ static struct i915_power_well vlv_power_wells[] = {
                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
                .ops = &vlv_dpio_power_well_ops,
-               .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
+               },
        },
        {
                .name = "dpio-tx-c-23",
@@ -2225,23 +2222,29 @@ static struct i915_power_well vlv_power_wells[] = {
                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
                .ops = &vlv_dpio_power_well_ops,
-               .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
+               },
        },
        {
                .name = "dpio-common",
                .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-               .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
                .ops = &vlv_dpio_cmn_power_well_ops,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+               },
        },
 };
 
-static struct i915_power_well chv_power_wells[] = {
+static const struct i915_power_well_desc chv_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "display",
@@ -2251,20 +2254,26 @@ static struct i915_power_well chv_power_wells[] = {
                 * required for any pipe to work.
                 */
                .domains = CHV_DISPLAY_POWER_DOMAINS,
-               .id = CHV_DISP_PW_PIPE_A,
                .ops = &chv_pipe_power_well_ops,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "dpio-common-bc",
                .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
-               .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
                .ops = &chv_dpio_cmn_power_well_ops,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+               },
        },
        {
                .name = "dpio-common-d",
                .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
-               .id = PUNIT_POWER_WELL_DPIO_CMN_D,
                .ops = &chv_dpio_cmn_power_well_ops,
+               .id = CHV_DISP_PW_DPIO_CMN_D,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+               },
        },
 };
 
@@ -2275,18 +2284,18 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
        bool ret;
 
        power_well = lookup_power_well(dev_priv, power_well_id);
-       ret = power_well->ops->is_enabled(dev_priv, power_well);
+       ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
 
        return ret;
 }
 
-static struct i915_power_well skl_power_wells[] = {
+static const struct i915_power_well_desc skl_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 1",
@@ -2295,6 +2304,8 @@ static struct i915_power_well skl_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
                        .hsw.has_fuses = true,
                },
        },
@@ -2304,12 +2315,16 @@ static struct i915_power_well skl_power_wells[] = {
                .domains = 0,
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_MISC_IO,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+               },
        },
        {
                .name = "DC off",
                .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
                .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_PW_DC_OFF,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 2",
@@ -2317,6 +2332,8 @@ static struct i915_power_well skl_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_2,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
                        .hsw.has_vga = true,
                        .hsw.has_fuses = true,
@@ -2326,35 +2343,51 @@ static struct i915_power_well skl_power_wells[] = {
                .name = "DDI A/E IO power well",
                .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_A_E,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
+               },
        },
        {
                .name = "DDI B IO power well",
                .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_B,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+               },
        },
        {
                .name = "DDI C IO power well",
                .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_C,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+               },
        },
        {
                .name = "DDI D IO power well",
                .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_D,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+               },
        },
 };
 
-static struct i915_power_well bxt_power_wells[] = {
+static const struct i915_power_well_desc bxt_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 1",
@@ -2362,6 +2395,8 @@ static struct i915_power_well bxt_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
                        .hsw.has_fuses = true,
                },
        },
@@ -2369,7 +2404,7 @@ static struct i915_power_well bxt_power_wells[] = {
                .name = "DC off",
                .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
                .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_PW_DC_OFF,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 2",
@@ -2377,6 +2412,8 @@ static struct i915_power_well bxt_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_2,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
                        .hsw.has_vga = true,
                        .hsw.has_fuses = true,
@@ -2386,7 +2423,7 @@ static struct i915_power_well bxt_power_wells[] = {
                .name = "dpio-common-a",
                .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
                .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = BXT_DPIO_CMN_A,
+               .id = BXT_DISP_PW_DPIO_CMN_A,
                {
                        .bxt.phy = DPIO_PHY1,
                },
@@ -2395,20 +2432,20 @@ static struct i915_power_well bxt_power_wells[] = {
                .name = "dpio-common-bc",
                .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
                .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = BXT_DPIO_CMN_BC,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
                {
                        .bxt.phy = DPIO_PHY0,
                },
        },
 };
 
-static struct i915_power_well glk_power_wells[] = {
+static const struct i915_power_well_desc glk_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 1",
@@ -2417,6 +2454,8 @@ static struct i915_power_well glk_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
                        .hsw.has_fuses = true,
                },
        },
@@ -2424,7 +2463,7 @@ static struct i915_power_well glk_power_wells[] = {
                .name = "DC off",
                .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
                .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_PW_DC_OFF,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 2",
@@ -2432,6 +2471,8 @@ static struct i915_power_well glk_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_2,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
                        .hsw.has_vga = true,
                        .hsw.has_fuses = true,
@@ -2441,7 +2482,7 @@ static struct i915_power_well glk_power_wells[] = {
                .name = "dpio-common-a",
                .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
                .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = BXT_DPIO_CMN_A,
+               .id = BXT_DISP_PW_DPIO_CMN_A,
                {
                        .bxt.phy = DPIO_PHY1,
                },
@@ -2450,7 +2491,7 @@ static struct i915_power_well glk_power_wells[] = {
                .name = "dpio-common-b",
                .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
                .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = BXT_DPIO_CMN_BC,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
                {
                        .bxt.phy = DPIO_PHY0,
                },
@@ -2459,7 +2500,7 @@ static struct i915_power_well glk_power_wells[] = {
                .name = "dpio-common-c",
                .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
                .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = GLK_DPIO_CMN_C,
+               .id = GLK_DISP_PW_DPIO_CMN_C,
                {
                        .bxt.phy = DPIO_PHY2,
                },
@@ -2468,47 +2509,71 @@ static struct i915_power_well glk_power_wells[] = {
                .name = "AUX A",
                .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = GLK_DISP_PW_AUX_A,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+               },
        },
        {
                .name = "AUX B",
                .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = GLK_DISP_PW_AUX_B,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+               },
        },
        {
                .name = "AUX C",
                .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = GLK_DISP_PW_AUX_C,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+               },
        },
        {
                .name = "DDI A IO power well",
                .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = GLK_DISP_PW_DDI_A,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+               },
        },
        {
                .name = "DDI B IO power well",
                .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_B,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+               },
        },
        {
                .name = "DDI C IO power well",
                .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_C,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+               },
        },
 };
 
-static struct i915_power_well cnl_power_wells[] = {
+static const struct i915_power_well_desc cnl_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 1",
@@ -2517,6 +2582,8 @@ static struct i915_power_well cnl_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_1,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
                        .hsw.has_fuses = true,
                },
        },
@@ -2524,31 +2591,47 @@ static struct i915_power_well cnl_power_wells[] = {
                .name = "AUX A",
                .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_AUX_A,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+               },
        },
        {
                .name = "AUX B",
                .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_AUX_B,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+               },
        },
        {
                .name = "AUX C",
                .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_AUX_C,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+               },
        },
        {
                .name = "AUX D",
                .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_AUX_D,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
+               },
        },
        {
                .name = "DC off",
                .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
                .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_PW_DC_OFF,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 2",
@@ -2556,6 +2639,8 @@ static struct i915_power_well cnl_power_wells[] = {
                .ops = &hsw_power_well_ops,
                .id = SKL_DISP_PW_2,
                {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
                        .hsw.has_vga = true,
                        .hsw.has_fuses = true,
@@ -2565,37 +2650,61 @@ static struct i915_power_well cnl_power_wells[] = {
                .name = "DDI A IO power well",
                .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_DDI_A,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+               },
        },
        {
                .name = "DDI B IO power well",
                .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_B,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+               },
        },
        {
                .name = "DDI C IO power well",
                .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_C,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+               },
        },
        {
                .name = "DDI D IO power well",
                .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_DDI_D,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+               },
        },
        {
                .name = "DDI F IO power well",
                .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_DDI_F,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
+               },
        },
        {
                .name = "AUX F",
                .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_AUX_F,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
+               },
        },
 };
 
@@ -2606,147 +2715,239 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
        .is_enabled = hsw_power_well_enabled,
 };
 
-static struct i915_power_well icl_power_wells[] = {
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+       .bios   = ICL_PWR_WELL_CTL_AUX1,
+       .driver = ICL_PWR_WELL_CTL_AUX2,
+       .debug  = ICL_PWR_WELL_CTL_AUX4,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+       .bios   = ICL_PWR_WELL_CTL_DDI1,
+       .driver = ICL_PWR_WELL_CTL_DDI2,
+       .debug  = ICL_PWR_WELL_CTL_DDI4,
+};
+
+static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
-               .id = I915_DISP_PW_ALWAYS_ON,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 1",
                /* Handled by the DMC firmware */
                .domains = 0,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_1,
-               .hsw.has_fuses = true,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
        },
        {
                .name = "power well 2",
                .domains = ICL_PW_2_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_2,
-               .hsw.has_fuses = true,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+                       .hsw.has_fuses = true,
+               },
        },
        {
                .name = "DC off",
                .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
                .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_PW_DC_OFF,
+               .id = DISP_PW_ID_NONE,
        },
        {
                .name = "power well 3",
                .domains = ICL_PW_3_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_3,
-               .hsw.irq_pipe_mask = BIT(PIPE_B),
-               .hsw.has_vga = true,
-               .hsw.has_fuses = true,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
        },
        {
                .name = "DDI A IO",
                .domains = ICL_DDI_IO_A_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_DDI_A,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+               },
        },
        {
                .name = "DDI B IO",
                .domains = ICL_DDI_IO_B_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_DDI_B,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+               },
        },
        {
                .name = "DDI C IO",
                .domains = ICL_DDI_IO_C_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_DDI_C,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+               },
        },
        {
                .name = "DDI D IO",
                .domains = ICL_DDI_IO_D_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_DDI_D,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+               },
        },
        {
                .name = "DDI E IO",
                .domains = ICL_DDI_IO_E_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_DDI_E,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
+               },
        },
        {
                .name = "DDI F IO",
                .domains = ICL_DDI_IO_F_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_DDI_F,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
+               },
        },
        {
                .name = "AUX A",
                .domains = ICL_AUX_A_IO_POWER_DOMAINS,
                .ops = &icl_combo_phy_aux_power_well_ops,
-               .id = ICL_DISP_PW_AUX_A,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+               },
        },
        {
                .name = "AUX B",
                .domains = ICL_AUX_B_IO_POWER_DOMAINS,
                .ops = &icl_combo_phy_aux_power_well_ops,
-               .id = ICL_DISP_PW_AUX_B,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+               },
        },
        {
                .name = "AUX C",
                .domains = ICL_AUX_C_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_C,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+               },
        },
        {
                .name = "AUX D",
                .domains = ICL_AUX_D_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_D,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+               },
        },
        {
                .name = "AUX E",
                .domains = ICL_AUX_E_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_E,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+               },
        },
        {
                .name = "AUX F",
                .domains = ICL_AUX_F_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_F,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+               },
        },
        {
                .name = "AUX TBT1",
                .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_TBT1,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+               },
        },
        {
                .name = "AUX TBT2",
                .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_TBT2,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+               },
        },
        {
                .name = "AUX TBT3",
                .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_TBT3,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+               },
        },
        {
                .name = "AUX TBT4",
                .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_AUX_TBT4,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+               },
        },
        {
                .name = "power well 4",
                .domains = ICL_PW_4_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = ICL_DISP_PW_4,
-               .hsw.has_fuses = true,
-               .hsw.irq_pipe_mask = BIT(PIPE_C),
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+                       .hsw.has_fuses = true,
+                       .hsw.irq_pipe_mask = BIT(PIPE_C),
+               },
        },
 };
 
@@ -2809,26 +3010,41 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
        return mask;
 }
 
-static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+                 const struct i915_power_well_desc *power_well_descs,
+                 int power_well_count)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       u64 power_well_ids;
+       u64 power_well_ids = 0;
        int i;
 
-       power_well_ids = 0;
-       for (i = 0; i < power_domains->power_well_count; i++) {
-               enum i915_power_well_id id = power_domains->power_wells[i].id;
+       power_domains->power_well_count = power_well_count;
+       power_domains->power_wells =
+                               kcalloc(power_well_count,
+                                       sizeof(*power_domains->power_wells),
+                                       GFP_KERNEL);
+       if (!power_domains->power_wells)
+               return -ENOMEM;
+
+       for (i = 0; i < power_well_count; i++) {
+               enum i915_power_well_id id = power_well_descs[i].id;
+
+               power_domains->power_wells[i].desc = &power_well_descs[i];
+
+               if (id == DISP_PW_ID_NONE)
+                       continue;
 
                WARN_ON(id >= sizeof(power_well_ids) * 8);
                WARN_ON(power_well_ids & BIT_ULL(id));
                power_well_ids |= BIT_ULL(id);
        }
+
+       return 0;
 }
 
-#define set_power_wells(power_domains, __power_wells) ({               \
-       (power_domains)->power_wells = (__power_wells);                 \
-       (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
-})
+#define set_power_wells(power_domains, __power_well_descs) \
+       __set_power_wells(power_domains, __power_well_descs, \
+                         ARRAY_SIZE(__power_well_descs))
 
 /**
  * intel_power_domains_init - initializes the power domain structures
@@ -2840,6 +3056,7 @@ static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
 int intel_power_domains_init(struct drm_i915_private *dev_priv)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       int err;
 
        i915_modparams.disable_power_well =
                sanitize_disable_power_well_option(dev_priv,
@@ -2856,15 +3073,15 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
         * the disabling order is reversed.
         */
        if (IS_ICELAKE(dev_priv)) {
-               set_power_wells(power_domains, icl_power_wells);
+               err = set_power_wells(power_domains, icl_power_wells);
        } else if (IS_HASWELL(dev_priv)) {
-               set_power_wells(power_domains, hsw_power_wells);
+               err = set_power_wells(power_domains, hsw_power_wells);
        } else if (IS_BROADWELL(dev_priv)) {
-               set_power_wells(power_domains, bdw_power_wells);
+               err = set_power_wells(power_domains, bdw_power_wells);
        } else if (IS_GEN9_BC(dev_priv)) {
-               set_power_wells(power_domains, skl_power_wells);
+               err = set_power_wells(power_domains, skl_power_wells);
        } else if (IS_CANNONLAKE(dev_priv)) {
-               set_power_wells(power_domains, cnl_power_wells);
+               err = set_power_wells(power_domains, cnl_power_wells);
 
                /*
                 * DDI and Aux IO are getting enabled for all ports
@@ -2876,57 +3093,31 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
                        power_domains->power_well_count -= 2;
 
        } else if (IS_BROXTON(dev_priv)) {
-               set_power_wells(power_domains, bxt_power_wells);
+               err = set_power_wells(power_domains, bxt_power_wells);
        } else if (IS_GEMINILAKE(dev_priv)) {
-               set_power_wells(power_domains, glk_power_wells);
+               err = set_power_wells(power_domains, glk_power_wells);
        } else if (IS_CHERRYVIEW(dev_priv)) {
-               set_power_wells(power_domains, chv_power_wells);
+               err = set_power_wells(power_domains, chv_power_wells);
        } else if (IS_VALLEYVIEW(dev_priv)) {
-               set_power_wells(power_domains, vlv_power_wells);
+               err = set_power_wells(power_domains, vlv_power_wells);
        } else if (IS_I830(dev_priv)) {
-               set_power_wells(power_domains, i830_power_wells);
+               err = set_power_wells(power_domains, i830_power_wells);
        } else {
-               set_power_wells(power_domains, i9xx_always_on_power_well);
+               err = set_power_wells(power_domains, i9xx_always_on_power_well);
        }
 
-       assert_power_well_ids_unique(dev_priv);
-
-       return 0;
+       return err;
 }
 
 /**
- * intel_power_domains_fini - finalizes the power domain structures
+ * intel_power_domains_cleanup - clean up power domains resources
  * @dev_priv: i915 device instance
  *
- * Finalizes the power domain structures for @dev_priv depending upon the
- * supported platform. This function also disables runtime pm and ensures that
- * the device stays powered up so that the driver can be reloaded.
+ * Release any resources acquired by intel_power_domains_init()
  */
-void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
 {
-       struct device *kdev = &dev_priv->drm.pdev->dev;
-
-       /*
-        * The i915.ko module is still not prepared to be loaded when
-        * the power well is not enabled, so just enable it in case
-        * we're going to unload/reload.
-        * The following also reacquires the RPM reference the core passed
-        * to the driver during loading, which is dropped in
-        * intel_runtime_pm_enable(). We have to hand back the control of the
-        * device to the core with this reference held.
-        */
-       intel_display_set_init_power(dev_priv, true);
-
-       /* Remove the refcount we took to keep power well support disabled. */
-       if (!i915_modparams.disable_power_well)
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
-       /*
-        * Remove the refcount we took in intel_runtime_pm_enable() in case
-        * the platform doesn't support runtime PM.
-        */
-       if (!HAS_RUNTIME_PM(dev_priv))
-               pm_runtime_put(kdev);
+       kfree(dev_priv->power_domains.power_wells);
 }
 
 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -2936,9 +3127,9 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
 
        mutex_lock(&power_domains->lock);
        for_each_power_well(dev_priv, power_well) {
-               power_well->ops->sync_hw(dev_priv, power_well);
-               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
-                                                                    power_well);
+               power_well->desc->ops->sync_hw(dev_priv, power_well);
+               power_well->hw_enabled =
+                       power_well->desc->ops->is_enabled(dev_priv, power_well);
        }
        mutex_unlock(&power_domains->lock);
 }
@@ -3360,7 +3551,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
         *    The AUX IO power wells will be enabled on demand.
         */
        mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
        intel_power_well_enable(dev_priv, well);
        mutex_unlock(&power_domains->lock);
 
@@ -3372,10 +3563,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 
        /* 7. Setup MBUS. */
        icl_mbus_init(dev_priv);
-
-       /* 8. CHICKEN_DCPR_1 */
-       I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
-                                       CNL_DDI_CLOCK_REG_ACCESS_ON);
 }
 
 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -3401,7 +3588,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
         *    disabled at this point.
         */
        mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
        intel_power_well_disable(dev_priv, well);
        mutex_unlock(&power_domains->lock);
 
@@ -3416,9 +3603,9 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
 {
        struct i915_power_well *cmn_bc =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
        struct i915_power_well *cmn_d =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+               lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
 
        /*
         * DISPLAY_PHY_CONTROL can get corrupted if read. As a
@@ -3441,7 +3628,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
         * override and set the lane powerdown bits accding to the
         * current lane status.
         */
-       if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+       if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
                uint32_t status = I915_READ(DPLL(PIPE_A));
                unsigned int mask;
 
@@ -3472,7 +3659,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
                dev_priv->chv_phy_assert[DPIO_PHY0] = true;
        }
 
-       if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+       if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
                uint32_t status = I915_READ(DPIO_PHY_STATUS);
                unsigned int mask;
 
@@ -3503,20 +3690,20 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
 {
        struct i915_power_well *cmn =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
        struct i915_power_well *disp2d =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+               lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
 
        /* If the display might be already active skip this */
-       if (cmn->ops->is_enabled(dev_priv, cmn) &&
-           disp2d->ops->is_enabled(dev_priv, disp2d) &&
+       if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
+           disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
            I915_READ(DPIO_CTL) & DPIO_CMNRST)
                return;
 
        DRM_DEBUG_KMS("toggling display PHY side reset\n");
 
        /* cmnlane needs DPLL registers */
-       disp2d->ops->enable(dev_priv, disp2d);
+       disp2d->desc->ops->enable(dev_priv, disp2d);
 
        /*
         * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
@@ -3525,9 +3712,11 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
         * Simply ungating isn't enough to reset the PHY enough to get
         * ports and lanes running.
         */
-       cmn->ops->disable(dev_priv, cmn);
+       cmn->desc->ops->disable(dev_priv, cmn);
 }
 
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+
 /**
  * intel_power_domains_init_hw - initialize hardware power domain state
  * @dev_priv: i915 device instance
@@ -3535,9 +3724,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  *
  * This function initializes the hardware power domain state and enables all
  * power wells belonging to the INIT power domain. Power wells in other
- * domains (and not in the INIT domain) are referenced or disabled during the
- * modeset state HW readout. After that the reference count of each power well
- * must match its HW enabled state, see intel_power_domains_verify_state().
+ * domains (and not in the INIT domain) are referenced or disabled by
+ * intel_modeset_readout_hw_state(). After that the reference count of each
+ * power well must match its HW enabled state, see
+ * intel_power_domains_verify_state().
+ *
+ * It will return with power domains disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_fini_hw().
  */
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
 {
@@ -3563,30 +3757,117 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
                mutex_unlock(&power_domains->lock);
        }
 
-       /* For now, we need the power well to be always enabled. */
-       intel_display_set_init_power(dev_priv, true);
+       /*
+        * Keep all power wells enabled for any dependent HW access during
+        * initialization and to make sure we keep BIOS enabled display HW
+        * resources powered until display HW readout is complete. We drop
+        * this reference in intel_power_domains_enable().
+        */
+       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
        /* Disable power support if the user asked so. */
        if (!i915_modparams.disable_power_well)
                intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
        intel_power_domains_sync_hw(dev_priv);
+
        power_domains->initializing = false;
 }
 
+/**
+ * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * @dev_priv: i915 device instance
+ *
+ * De-initializes the display power domain HW state. It also ensures that the
+ * device stays powered up so that the driver can be reloaded.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and must be paired with
+ * intel_power_domains_init_hw().
+ */
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
+{
+       /* Keep the power well enabled, but cancel its rpm wakeref. */
+       intel_runtime_pm_put(dev_priv);
+
+       /* Remove the refcount we took to keep power well support disabled. */
+       if (!i915_modparams.disable_power_well)
+               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       intel_power_domains_verify_state(dev_priv);
+}
+
+/**
+ * intel_power_domains_enable - enable toggling of display power wells
+ * @dev_priv: i915 device instance
+ *
+ * Enable the ondemand enabling/disabling of the display power wells. Note that
+ * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
+ * only at specific points of the display modeset sequence, thus they are not
+ * affected by the intel_power_domains_enable()/disable() calls. The purpose
+ * of these function is to keep the rest of power wells enabled until the end
+ * of display HW readout (which will acquire the power references reflecting
+ * the current HW state).
+ */
+void intel_power_domains_enable(struct drm_i915_private *dev_priv)
+{
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       intel_power_domains_verify_state(dev_priv);
+}
+
+/**
+ * intel_power_domains_disable - disable toggling of display power wells
+ * @dev_priv: i915 device instance
+ *
+ * Disable the ondemand enabling/disabling of the display power wells. See
+ * intel_power_domains_enable() for which power wells this call controls.
+ */
+void intel_power_domains_disable(struct drm_i915_private *dev_priv)
+{
+       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+       intel_power_domains_verify_state(dev_priv);
+}
+
 /**
  * intel_power_domains_suspend - suspend power domain state
  * @dev_priv: i915 device instance
+ * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
  *
  * This function prepares the hardware power domain state before entering
- * system suspend. It must be paired with intel_power_domains_init_hw().
+ * system suspend.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and paired with intel_power_domains_resume().
  */
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+                                enum i915_drm_suspend_mode suspend_mode)
 {
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       /*
+        * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
+        * support don't manually deinit the power domains. This also means the
+        * CSR/DMC firmware will stay active, it will power down any HW
+        * resources as required and also enable deeper system power states
+        * that would be blocked if the firmware was inactive.
+        */
+       if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+           suspend_mode == I915_DRM_SUSPEND_IDLE &&
+           dev_priv->csr.dmc_payload != NULL) {
+               intel_power_domains_verify_state(dev_priv);
+               return;
+       }
+
        /*
         * Even if power well support was disabled we still want to disable
-        * power wells while we are system suspended.
+        * power wells if power domains must be deinitialized for suspend.
         */
-       if (!i915_modparams.disable_power_well)
+       if (!i915_modparams.disable_power_well) {
                intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+               intel_power_domains_verify_state(dev_priv);
+       }
 
        if (IS_ICELAKE(dev_priv))
                icl_display_core_uninit(dev_priv);
@@ -3596,8 +3877,36 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
                skl_display_core_uninit(dev_priv);
        else if (IS_GEN9_LP(dev_priv))
                bxt_display_core_uninit(dev_priv);
+
+       power_domains->display_core_suspended = true;
 }
 
+/**
+ * intel_power_domains_resume - resume power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function resume the hardware power domain state during system resume.
+ *
+ * It will return with power domain support disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_suspend().
+ */
+void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+       if (power_domains->display_core_suspended) {
+               intel_power_domains_init_hw(dev_priv, true);
+               power_domains->display_core_suspended = false;
+       } else {
+               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       }
+
+       intel_power_domains_verify_state(dev_priv);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
@@ -3607,9 +3916,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
                enum intel_display_power_domain domain;
 
                DRM_DEBUG_DRIVER("%-25s %d\n",
-                                power_well->name, power_well->count);
+                                power_well->desc->name, power_well->count);
 
-               for_each_power_domain(domain, power_well->domains)
+               for_each_power_domain(domain, power_well->desc->domains)
                        DRM_DEBUG_DRIVER("  %-23s %d\n",
                                         intel_display_power_domain_str(domain),
                                         power_domains->domain_use_count[domain]);
@@ -3626,7 +3935,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
  * acquiring reference counts for any power wells in use and disabling the
  * ones left on by BIOS but not required by any active output.
  */
-void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *power_well;
@@ -3645,22 +3954,25 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
                 * and PW1 power wells) are under FW control, so ignore them,
                 * since their state can change asynchronously.
                 */
-               if (!power_well->domains)
+               if (!power_well->desc->domains)
                        continue;
 
-               enabled = power_well->ops->is_enabled(dev_priv, power_well);
-               if ((power_well->count || power_well->always_on) != enabled)
+               enabled = power_well->desc->ops->is_enabled(dev_priv,
+                                                           power_well);
+               if ((power_well->count || power_well->desc->always_on) !=
+                   enabled)
                        DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
-                                 power_well->name, power_well->count, enabled);
+                                 power_well->desc->name,
+                                 power_well->count, enabled);
 
                domains_count = 0;
-               for_each_power_domain(domain, power_well->domains)
+               for_each_power_domain(domain, power_well->desc->domains)
                        domains_count += power_domains->domain_use_count[domain];
 
                if (power_well->count != domains_count) {
                        DRM_ERROR("power well %s refcount/domain refcount mismatch "
                                  "(refcount %d/domains refcount %d)\n",
-                                 power_well->name, power_well->count,
+                                 power_well->desc->name, power_well->count,
                                  domains_count);
                        dump_domain_info = true;
                }
@@ -3678,6 +3990,14 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
        mutex_unlock(&power_domains->lock);
 }
 
+#else
+
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+{
+}
+
+#endif
+
 /**
  * intel_runtime_pm_get - grab a runtime pm reference
  * @dev_priv: i915 device instance
@@ -3791,14 +4111,24 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  * This function enables runtime pm at the end of the driver load sequence.
  *
  * Note that this function does currently not enable runtime pm for the
- * subordinate display power domains. That is only done on the first modeset
- * using intel_display_set_init_power().
+ * subordinate display power domains. That is done by
+ * intel_power_domains_enable().
  */
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
        struct device *kdev = &pdev->dev;
 
+       /*
+        * Disable the system suspend direct complete optimization, which can
+        * leave the device suspended skipping the driver's suspend handlers
+        * if the device was already runtime suspended. This is needed due to
+        * the difference in our runtime and system suspend sequence and
+        * becaue the HDA driver may require us to enable the audio power
+        * domain during system suspend.
+        */
+       dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
+
        pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
        pm_runtime_mark_last_busy(kdev);
 
@@ -3825,3 +4155,18 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
         */
        pm_runtime_put_autosuspend(kdev);
 }
+
+void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
+
+       /* Transfer rpm ownership back to core */
+       WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
+            "Failed to pass rpm ownership back to core\n");
+
+       pm_runtime_dont_use_autosuspend(kdev);
+
+       if (!HAS_RUNTIME_PM(dev_priv))
+               pm_runtime_put(kdev);
+}
index f7026e887fa9bd65d996c1afa851a92be28e6e6a..9600ccfc5b7699ff54b1d74be02afbff7e9cb46f 100644 (file)
@@ -83,6 +83,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
        bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
                intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
        DEFINE_WAIT(wait);
+       u32 psr_status;
 
        vblank_start = adjusted_mode->crtc_vblank_start;
        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -104,8 +105,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
         * VBL interrupts will start the PSR exit and prevent a PSR
         * re-entry as well.
         */
-       if (intel_psr_wait_for_idle(new_crtc_state))
-               DRM_ERROR("PSR idle timed out, atomic update may fail\n");
+       if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
+               DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
+                         psr_status);
 
        local_irq_disable();
 
@@ -957,10 +959,10 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
 }
 
 static int
-intel_check_sprite_plane(struct intel_plane *plane,
-                        struct intel_crtc_state *crtc_state,
+intel_check_sprite_plane(struct intel_crtc_state *crtc_state,
                         struct intel_plane_state *state)
 {
+       struct intel_plane *plane = to_intel_plane(state->base.plane);
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = state->base.fb;
@@ -1407,8 +1409,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
        case DRM_FORMAT_XBGR8888:
        case DRM_FORMAT_ARGB8888:
        case DRM_FORMAT_ABGR8888:
-               if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
-                   modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+               if (is_ccs_modifier(modifier))
                        return true;
                /* fall through */
        case DRM_FORMAT_RGB565:
index 6e8e0b54674310d388e01476468c98d9c7b1173e..fd496416087c491bfaeffd94caa131707cd28f07 100644 (file)
@@ -222,7 +222,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
                goto fail;
        }
 
-       ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias;
+       ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias;
        vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
                                       PIN_OFFSET_BIAS | ggtt_pin_bias);
        if (IS_ERR(vma)) {
index 50b39aa4ffb88ac28d478124bf2e67a54e028167..3ad302c66254bb0b74703fa44df4e305a5013a33 100644 (file)
@@ -283,14 +283,24 @@ fw_domains_reset(struct drm_i915_private *i915,
                fw_domain_reset(i915, d);
 }
 
+static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG);
+       val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+       return val;
+}
+
 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 {
-       /* w/a for a sporadic read returning 0 by waiting for the GT
+       /*
+        * w/a for a sporadic read returning 0 by waiting for the GT
         * thread to wake up.
         */
-       if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
-                               GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
-               DRM_ERROR("GT thread status wait timed out\n");
+       WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000),
+                 "GT thread status wait timed out\n");
 }
 
 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
@@ -1729,7 +1739,7 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
 }
 
 static void i915_stop_engines(struct drm_i915_private *dev_priv,
-                             unsigned engine_mask)
+                             unsigned int engine_mask)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -1749,7 +1759,9 @@ static bool i915_in_reset(struct pci_dev *pdev)
        return gdrst & GRDOM_RESET_STATUS;
 }
 
-static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+static int i915_do_reset(struct drm_i915_private *dev_priv,
+                        unsigned int engine_mask,
+                        unsigned int retry)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
        int err;
@@ -1776,7 +1788,9 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
        return (gdrst & GRDOM_RESET_ENABLE) == 0;
 }
 
-static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+static int g33_do_reset(struct drm_i915_private *dev_priv,
+                       unsigned int engine_mask,
+                       unsigned int retry)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
 
@@ -1784,7 +1798,9 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
        return wait_for(g4x_reset_complete(pdev), 500);
 }
 
-static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+static int g4x_do_reset(struct drm_i915_private *dev_priv,
+                       unsigned int engine_mask,
+                       unsigned int retry)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
@@ -1821,7 +1837,8 @@ out:
 }
 
 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
-                            unsigned engine_mask)
+                            unsigned int engine_mask,
+                            unsigned int retry)
 {
        int ret;
 
@@ -1877,6 +1894,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
  * gen6_reset_engines - reset individual engines
  * @dev_priv: i915 device
  * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ * @retry: the count of of previous attempts to reset.
  *
  * This function will reset the individual engines that are set in engine_mask.
  * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
@@ -1887,7 +1905,8 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
  * Returns 0 on success, nonzero on error.
  */
 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
-                             unsigned engine_mask)
+                             unsigned int engine_mask,
+                             unsigned int retry)
 {
        struct intel_engine_cs *engine;
        const u32 hw_engine_mask[I915_NUM_ENGINES] = {
@@ -1926,7 +1945,7 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
  * Returns 0 on success, nonzero on error.
  */
 static int gen11_reset_engines(struct drm_i915_private *dev_priv,
-                              unsigned engine_mask)
+                              unsigned int engine_mask)
 {
        struct intel_engine_cs *engine;
        const u32 hw_engine_mask[I915_NUM_ENGINES] = {
@@ -2066,7 +2085,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static int gen8_reset_engine_start(struct intel_engine_cs *engine)
+static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        int ret;
@@ -2086,7 +2105,7 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine)
        return ret;
 }
 
-static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
+static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
@@ -2094,33 +2113,56 @@ static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
                      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
 }
 
+static int reset_engines(struct drm_i915_private *i915,
+                        unsigned int engine_mask,
+                        unsigned int retry)
+{
+       if (INTEL_GEN(i915) >= 11)
+               return gen11_reset_engines(i915, engine_mask);
+       else
+               return gen6_reset_engines(i915, engine_mask, retry);
+}
+
 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
-                             unsigned engine_mask)
+                             unsigned int engine_mask,
+                             unsigned int retry)
 {
        struct intel_engine_cs *engine;
+       const bool reset_non_ready = retry >= 1;
        unsigned int tmp;
        int ret;
 
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-               if (gen8_reset_engine_start(engine)) {
-                       ret = -EIO;
-                       goto not_ready;
-               }
+               ret = gen8_engine_reset_prepare(engine);
+               if (ret && !reset_non_ready)
+                       goto skip_reset;
+
+               /*
+                * If this is not the first failed attempt to prepare,
+                * we decide to proceed anyway.
+                *
+                * By doing so we risk context corruption and with
+                * some gens (kbl), possible system hang if reset
+                * happens during active bb execution.
+                *
+                * We rather take context corruption instead of
+                * failed reset with a wedged driver/gpu. And
+                * active bb execution case should be covered by
+                * i915_stop_engines we have before the reset.
+                */
        }
 
-       if (INTEL_GEN(dev_priv) >= 11)
-               ret = gen11_reset_engines(dev_priv, engine_mask);
-       else
-               ret = gen6_reset_engines(dev_priv, engine_mask);
+       ret = reset_engines(dev_priv, engine_mask, retry);
 
-not_ready:
+skip_reset:
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
-               gen8_reset_engine_cancel(engine);
+               gen8_engine_reset_cancel(engine);
 
        return ret;
 }
 
-typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
+typedef int (*reset_func)(struct drm_i915_private *,
+                         unsigned int engine_mask, unsigned int retry);
 
 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
 {
@@ -2143,12 +2185,15 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
                return NULL;
 }
 
-int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+int intel_gpu_reset(struct drm_i915_private *dev_priv,
+                   const unsigned int engine_mask)
 {
        reset_func reset = intel_get_gpu_reset(dev_priv);
-       int retry;
+       unsigned int retry;
        int ret;
 
+       GEM_BUG_ON(!engine_mask);
+
        /*
         * We want to perform per-engine reset from atomic context (e.g.
         * softirq), which imposes the constraint that we cannot sleep.
@@ -2190,8 +2235,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 
                ret = -ENODEV;
                if (reset) {
-                       GEM_TRACE("engine_mask=%x\n", engine_mask);
-                       ret = reset(dev_priv, engine_mask);
+                       ret = reset(dev_priv, engine_mask, retry);
+                       GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n",
+                                 engine_mask, ret, retry);
                }
                if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
                        break;
@@ -2237,20 +2283,28 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
 bool
 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
 {
-       if (unlikely(i915_modparams.mmio_debug ||
-                    dev_priv->uncore.unclaimed_mmio_check <= 0))
-               return false;
+       bool ret = false;
+
+       spin_lock_irq(&dev_priv->uncore.lock);
+
+       if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0))
+               goto out;
 
        if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
-               DRM_DEBUG("Unclaimed register detected, "
-                         "enabling oneshot unclaimed register reporting. "
-                         "Please use i915.mmio_debug=N for more information.\n");
-               i915_modparams.mmio_debug++;
+               if (!i915_modparams.mmio_debug) {
+                       DRM_DEBUG("Unclaimed register detected, "
+                                 "enabling oneshot unclaimed register reporting. "
+                                 "Please use i915.mmio_debug=N for more information.\n");
+                       i915_modparams.mmio_debug++;
+               }
                dev_priv->uncore.unclaimed_mmio_check--;
-               return true;
+               ret = true;
        }
 
-       return false;
+out:
+       spin_unlock_irq(&dev_priv->uncore.lock);
+
+       return ret;
 }
 
 static enum forcewake_domains
index 74bf76f3fddc4091d26c9a7227ca689ac7975c19..92cb82dd0c0731fdb09c3d51a66b089768b1c7b0 100644 (file)
@@ -163,8 +163,14 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
        u32 guc_wopcm_rsvd;
        int err;
 
+       if (!USES_GUC(dev_priv))
+               return 0;
+
        GEM_BUG_ON(!wopcm->size);
 
+       if (i915_inject_load_failure())
+               return -E2BIG;
+
        if (guc_fw_size >= wopcm->size) {
                DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.",
                          guc_fw_size / 1024);
index 7efb326badcd677e98ddfe5c6d1e869d201492ba..e272127783fe8abaa0a1fce03ef28cce32beada5 100644 (file)
@@ -906,7 +906,11 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       err = i915_gem_object_set_to_wc_domain(obj, true);
+       if (err)
+               goto err;
+
+       cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
        if (IS_ERR(cmd)) {
                err = PTR_ERR(cmd);
                goto err;
@@ -936,13 +940,10 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
        }
 
        *cmd = MI_BATCH_BUFFER_END;
+       i915_gem_chipset_flush(i915);
 
        i915_gem_object_unpin_map(obj);
 
-       err = i915_gem_object_set_to_gtt_domain(obj, false);
-       if (err)
-               goto err;
-
        batch = i915_vma_instance(obj, vma->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
new file mode 100644 (file)
index 0000000..d0aa19d
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2018 Intel Corporation
+ */
+
+#include <linux/random.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+#include "igt_flush_test.h"
+
+static int switch_to_context(struct drm_i915_private *i915,
+                            struct i915_gem_context *ctx)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int err = 0;
+
+       intel_runtime_pm_get(i915);
+
+       for_each_engine(engine, i915, id) {
+               struct i915_request *rq;
+
+               rq = i915_request_alloc(engine, ctx);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_add(rq);
+       }
+
+       intel_runtime_pm_put(i915);
+
+       return err;
+}
+
+static void trash_stolen(struct drm_i915_private *i915)
+{
+       struct i915_ggtt *ggtt = &i915->ggtt;
+       const u64 slot = ggtt->error_capture.start;
+       const resource_size_t size = resource_size(&i915->dsm);
+       unsigned long page;
+       u32 prng = 0x12345678;
+
+       for (page = 0; page < size; page += PAGE_SIZE) {
+               const dma_addr_t dma = i915->dsm.start + page;
+               u32 __iomem *s;
+               int x;
+
+               ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+
+               s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
+               for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
+                       prng = next_pseudo_random32(prng);
+                       iowrite32(prng, &s[x]);
+               }
+               io_mapping_unmap_atomic(s);
+       }
+
+       ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
+static void simulate_hibernate(struct drm_i915_private *i915)
+{
+       intel_runtime_pm_get(i915);
+
+       /*
+        * As a final sting in the tail, invalidate stolen. Under a real S4,
+        * stolen is lost and needs to be refilled on resume. However, under
+        * CI we merely do S4-device testing (as full S4 is too unreliable
+        * for automated testing across a cluster), so to simulate the effect
+        * of stolen being trashed across S4, we trash it ourselves.
+        */
+       trash_stolen(i915);
+
+       intel_runtime_pm_put(i915);
+}
+
+static int pm_prepare(struct drm_i915_private *i915)
+{
+       int err = 0;
+
+       if (i915_gem_suspend(i915)) {
+               pr_err("i915_gem_suspend failed\n");
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static void pm_suspend(struct drm_i915_private *i915)
+{
+       intel_runtime_pm_get(i915);
+
+       i915_gem_suspend_gtt_mappings(i915);
+       i915_gem_suspend_late(i915);
+
+       intel_runtime_pm_put(i915);
+}
+
+static void pm_hibernate(struct drm_i915_private *i915)
+{
+       intel_runtime_pm_get(i915);
+
+       i915_gem_suspend_gtt_mappings(i915);
+
+       i915_gem_freeze(i915);
+       i915_gem_freeze_late(i915);
+
+       intel_runtime_pm_put(i915);
+}
+
+static void pm_resume(struct drm_i915_private *i915)
+{
+       /*
+        * Both suspend and hibernate follow the same wakeup path and assume
+        * that runtime-pm just works.
+        */
+       intel_runtime_pm_get(i915);
+
+       intel_engines_sanitize(i915);
+       i915_gem_sanitize(i915);
+       i915_gem_resume(i915);
+
+       intel_runtime_pm_put(i915);
+}
+
+static int igt_gem_suspend(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct i915_gem_context *ctx;
+       struct drm_file *file;
+       int err;
+
+       file = mock_file(i915);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       err = -ENOMEM;
+       mutex_lock(&i915->drm.struct_mutex);
+       ctx = live_context(i915, file);
+       if (!IS_ERR(ctx))
+               err = switch_to_context(i915, ctx);
+       mutex_unlock(&i915->drm.struct_mutex);
+       if (err)
+               goto out;
+
+       err = pm_prepare(i915);
+       if (err)
+               goto out;
+
+       pm_suspend(i915);
+
+       /* Here be dragons! Note that with S3RST any S3 may become S4! */
+       simulate_hibernate(i915);
+
+       pm_resume(i915);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       err = switch_to_context(i915, ctx);
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       mutex_unlock(&i915->drm.struct_mutex);
+out:
+       mock_file_free(i915, file);
+       return err;
+}
+
+static int igt_gem_hibernate(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct i915_gem_context *ctx;
+       struct drm_file *file;
+       int err;
+
+       file = mock_file(i915);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       err = -ENOMEM;
+       mutex_lock(&i915->drm.struct_mutex);
+       ctx = live_context(i915, file);
+       if (!IS_ERR(ctx))
+               err = switch_to_context(i915, ctx);
+       mutex_unlock(&i915->drm.struct_mutex);
+       if (err)
+               goto out;
+
+       err = pm_prepare(i915);
+       if (err)
+               goto out;
+
+       pm_hibernate(i915);
+
+       /* Here be dragons! */
+       simulate_hibernate(i915);
+
+       pm_resume(i915);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       err = switch_to_context(i915, ctx);
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       mutex_unlock(&i915->drm.struct_mutex);
+out:
+       mock_file_free(i915, file);
+       return err;
+}
+
+int i915_gem_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_gem_suspend),
+               SUBTEST(igt_gem_hibernate),
+       };
+
+       return i915_subtests(tests, i915);
+}
index 3a095c37c1203ba7a5e2a72bd29d5d40395d9039..4e6a221063acf6eda4ba95a8043036f98116b947 100644 (file)
@@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj,
 {
        unsigned int needs_clflush;
        struct page *page;
-       u32 *map;
+       void *map;
+       u32 *cpu;
        int err;
 
        err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
@@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj,
 
        page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        map = kmap_atomic(page);
+       cpu = map + offset_in_page(offset);
 
-       if (needs_clflush & CLFLUSH_BEFORE) {
-               mb();
-               clflush(map+offset_in_page(offset) / sizeof(*map));
-               mb();
-       }
+       if (needs_clflush & CLFLUSH_BEFORE)
+               drm_clflush_virt_range(cpu, sizeof(*cpu));
 
-       map[offset_in_page(offset) / sizeof(*map)] = v;
+       *cpu = v;
 
-       if (needs_clflush & CLFLUSH_AFTER) {
-               mb();
-               clflush(map+offset_in_page(offset) / sizeof(*map));
-               mb();
-       }
+       if (needs_clflush & CLFLUSH_AFTER)
+               drm_clflush_virt_range(cpu, sizeof(*cpu));
 
        kunmap_atomic(map);
-
        i915_gem_obj_finish_shmem_access(obj);
+
        return 0;
 }
 
@@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj,
 {
        unsigned int needs_clflush;
        struct page *page;
-       u32 *map;
+       void *map;
+       u32 *cpu;
        int err;
 
        err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
@@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj,
 
        page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        map = kmap_atomic(page);
+       cpu = map + offset_in_page(offset);
 
-       if (needs_clflush & CLFLUSH_BEFORE) {
-               mb();
-               clflush(map+offset_in_page(offset) / sizeof(*map));
-               mb();
-       }
+       if (needs_clflush & CLFLUSH_BEFORE)
+               drm_clflush_virt_range(cpu, sizeof(*cpu));
 
-       *v = map[offset_in_page(offset) / sizeof(*map)];
-       kunmap_atomic(map);
+       *v = *cpu;
 
+       kunmap_atomic(map);
        i915_gem_obj_finish_shmem_access(obj);
+
        return 0;
 }
 
index ba4f322d56b8c71a43419a096a06637f12c95c35..6d3516d5bff9e5154d83da66033950e132acc7b9 100644 (file)
@@ -282,7 +282,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
                               view.partial.offset,
                               view.partial.size,
                               vma->size >> PAGE_SHIFT,
-                              tile_row_pages(obj),
+                              tile->tiling ? tile_row_pages(obj) : 0,
                               vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
                               offset >> PAGE_SHIFT,
                               (unsigned int)offset_in_page(offset),
index a00e2bd08bce1aba6babae931e64b91c5506387e..a15713cae3b3a9875a76a130599dafb1d4205ace 100644 (file)
@@ -17,6 +17,7 @@ selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
 selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(gem, i915_gem_live_selftests)
 selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(contexts, i915_gem_context_live_selftests)
index 407c98fb917057dcb3026b1fb11bd6d2cc82a13b..90ba88c972cfebe24818d8dc8bbc3a02300f3031 100644 (file)
@@ -65,6 +65,40 @@ static int check_all_doorbells(struct intel_guc *guc)
        return 0;
 }
 
+static int ring_doorbell_nop(struct intel_guc_client *client)
+{
+       struct guc_process_desc *desc = __get_process_desc(client);
+       int err;
+
+       client->use_nop_wqi = true;
+
+       spin_lock_irq(&client->wq_lock);
+
+       guc_wq_item_append(client, 0, 0, 0, 0);
+       guc_ring_doorbell(client);
+
+       spin_unlock_irq(&client->wq_lock);
+
+       client->use_nop_wqi = false;
+
+       /* if there are no issues GuC will update the WQ head and keep the
+        * WQ in active status
+        */
+       err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10);
+       if (err) {
+               pr_err("doorbell %u ring failed!\n", client->doorbell_id);
+               return -EIO;
+       }
+
+       if (desc->wq_status != WQ_STATUS_ACTIVE) {
+               pr_err("doorbell %u ring put WQ in bad state (%u)!\n",
+                      client->doorbell_id, desc->wq_status);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 /*
  * Basic client sanity check, handy to validate create_clients.
  */
@@ -332,6 +366,10 @@ static int igt_guc_doorbells(void *arg)
                err = check_all_doorbells(guc);
                if (err)
                        goto out;
+
+               err = ring_doorbell_nop(clients[i]);
+               if (err)
+                       goto out;
        }
 
 out:
index 65d66cdedd26c6027be1881f380e27578d07518a..db378226ac105e4df702bccfd795097cdd695180 100644 (file)
@@ -1018,8 +1018,41 @@ static int evict_vma(void *data)
        return err;
 }
 
+static int evict_fence(void *data)
+{
+       struct evict_vma *arg = data;
+       struct drm_i915_private *i915 = arg->vma->vm->i915;
+       int err;
+
+       complete(&arg->completion);
+
+       mutex_lock(&i915->drm.struct_mutex);
+
+       /* Mark the fence register as dirty to force the mmio update. */
+       err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
+       if (err) {
+               pr_err("Invalid Y-tiling settings; err:%d\n", err);
+               goto out_unlock;
+       }
+
+       err = i915_vma_pin_fence(arg->vma);
+       if (err) {
+               pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
+               goto out_unlock;
+       }
+
+       i915_vma_unpin_fence(arg->vma);
+
+out_unlock:
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       return err;
+}
+
 static int __igt_reset_evict_vma(struct drm_i915_private *i915,
-                                struct i915_address_space *vm)
+                                struct i915_address_space *vm,
+                                int (*fn)(void *),
+                                unsigned int flags)
 {
        struct drm_i915_gem_object *obj;
        struct task_struct *tsk = NULL;
@@ -1040,12 +1073,20 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
        if (err)
                goto unlock;
 
-       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       obj = i915_gem_object_create_internal(i915, SZ_1M);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
                goto fini;
        }
 
+       if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+               err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
+               if (err) {
+                       pr_err("Invalid X-tiling settings; err:%d\n", err);
+                       goto out_obj;
+               }
+       }
+
        arg.vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(arg.vma)) {
                err = PTR_ERR(arg.vma);
@@ -1059,11 +1100,28 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
        }
 
        err = i915_vma_pin(arg.vma, 0, 0,
-                          i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER);
-       if (err)
+                          i915_vma_is_ggtt(arg.vma) ?
+                          PIN_GLOBAL | PIN_MAPPABLE :
+                          PIN_USER);
+       if (err) {
+               i915_request_add(rq);
                goto out_obj;
+       }
+
+       if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+               err = i915_vma_pin_fence(arg.vma);
+               if (err) {
+                       pr_err("Unable to pin X-tiled fence; err:%d\n", err);
+                       i915_vma_unpin(arg.vma);
+                       i915_request_add(rq);
+                       goto out_obj;
+               }
+       }
+
+       err = i915_vma_move_to_active(arg.vma, rq, flags);
 
-       err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE);
+       if (flags & EXEC_OBJECT_NEEDS_FENCE)
+               i915_vma_unpin_fence(arg.vma);
        i915_vma_unpin(arg.vma);
 
        i915_request_get(rq);
@@ -1086,7 +1144,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 
        init_completion(&arg.completion);
 
-       tsk = kthread_run(evict_vma, &arg, "igt/evict_vma");
+       tsk = kthread_run(fn, &arg, "igt/evict_vma");
        if (IS_ERR(tsk)) {
                err = PTR_ERR(tsk);
                tsk = NULL;
@@ -1137,29 +1195,47 @@ static int igt_reset_evict_ggtt(void *arg)
 {
        struct drm_i915_private *i915 = arg;
 
-       return __igt_reset_evict_vma(i915, &i915->ggtt.vm);
+       return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+                                    evict_vma, EXEC_OBJECT_WRITE);
 }
 
 static int igt_reset_evict_ppgtt(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct i915_gem_context *ctx;
+       struct drm_file *file;
        int err;
 
+       file = mock_file(i915);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
        mutex_lock(&i915->drm.struct_mutex);
-       ctx = kernel_context(i915);
+       ctx = live_context(i915, file);
        mutex_unlock(&i915->drm.struct_mutex);
-       if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto out;
+       }
 
        err = 0;
        if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
-               err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm);
+               err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm,
+                                           evict_vma, EXEC_OBJECT_WRITE);
 
-       kernel_context_close(ctx);
+out:
+       mock_file_free(i915, file);
        return err;
 }
 
+static int igt_reset_evict_fence(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+
+       return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+                                    evict_fence, EXEC_OBJECT_NEEDS_FENCE);
+}
+
 static int wait_for_others(struct drm_i915_private *i915,
                           struct intel_engine_cs *exclude)
 {
@@ -1409,6 +1485,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_reset_wait),
                SUBTEST(igt_reset_evict_ggtt),
                SUBTEST(igt_reset_evict_ppgtt),
+               SUBTEST(igt_reset_evict_fence),
                SUBTEST(igt_handle_error),
        };
        bool saved_hangcheck;
index 8904f1ce64e3d69bfa2745faf06d7caf47352dea..d937bdff26f99cec944a4550f02fddd3ad482772 100644 (file)
@@ -43,6 +43,7 @@ mock_context(struct drm_i915_private *i915,
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
+       INIT_LIST_HEAD(&ctx->hw_id_link);
 
        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
                struct intel_context *ce = &ctx->__engine[n];
@@ -50,11 +51,9 @@ mock_context(struct drm_i915_private *i915,
                ce->gem_context = ctx;
        }
 
-       ret = ida_simple_get(&i915->contexts.hw_ida,
-                            0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+       ret = i915_gem_context_pin_hw_id(ctx);
        if (ret < 0)
                goto err_handles;
-       ctx->hw_id = ret;
 
        if (name) {
                ctx->name = kstrdup(name, GFP_KERNEL);
@@ -85,11 +84,7 @@ void mock_context_close(struct i915_gem_context *ctx)
 
 void mock_init_contexts(struct drm_i915_private *i915)
 {
-       INIT_LIST_HEAD(&i915->contexts.list);
-       ida_init(&i915->contexts.hw_ida);
-
-       INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
-       init_llist_head(&i915->contexts.free_list);
+       init_contexts(i915);
 }
 
 struct i915_gem_context *
index a140ea5c3a7c50edca13d1ad0c81955e1dea76e8..6ae418c76015b2187dd31437f3a460868765c5d5 100644 (file)
@@ -118,6 +118,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
        ggtt->vm.vma_ops.clear_pages = clear_pages;
 
        i915_address_space_init(&ggtt->vm, i915);
+
+       ggtt->vm.is_ggtt = true;
 }
 
 void mock_fini_ggtt(struct drm_i915_private *i915)
index fbf5cfc9b352f7a005071909479624e46253f516..fd965ffbb92e33fcef415033b1a78a09849bda05 100644 (file)
        INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
        INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
        INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+       INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
        INTEL_VGA_DEVICE(0x3E9A, info)  /* SRV GT2 */
 
 /* CFL H */
index 7f5634ce8e885d0aa26ee210592b5b93fb9f01e5..a4446f452040aa2bdb15dfd8c28c320b073f9bf0 100644 (file)
@@ -529,6 +529,28 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
 
+/*
+ * Once upon a time we supposed that writes through the GGTT would be
+ * immediately in physical memory (once flushed out of the CPU path). However,
+ * on a few different processors and chipsets, this is not necessarily the case
+ * as the writes appear to be buffered internally. Thus a read of the backing
+ * storage (physical memory) via a different path (with different physical tags
+ * to the indirect write via the GGTT) will see stale values from before
+ * the GGTT write. Inside the kernel, we can for the most part keep track of
+ * the different read/write domains in use (e.g. set-domain), but the assumption
+ * of coherency is baked into the ABI, hence reporting its true state in this
+ * parameter.
+ *
+ * Reports true when writes via mmap_gtt are immediately visible following an
+ * lfence to flush the WCB.
+ *
+ * Reports false when writes via mmap_gtt are indeterminately delayed in an in
+ * internal buffer and are _not_ immediately visible to third parties accessing
+ * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
+ * communications channel when reporting false is strongly disadvised.
+ */
+#define I915_PARAM_MMAP_GTT_COHERENT   52
+
 typedef struct drm_i915_getparam {
        __s32 param;
        /*