]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <airlied@redhat.com>
Mon, 11 Feb 2019 04:04:05 +0000 (14:04 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 11 Feb 2019 04:04:20 +0000 (14:04 +1000)
Updates for 5.1:
- GDS fixes
- Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES interface
- GPUVM fixes
- PCIE DPM switching fixes for vega20
- Vega10 uclk DPM regression fix
- DC Freesync fixes
- DC ABM fixes
- Various DC cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190208210214.27666-1-alexander.deucher@amd.com
632 files changed:
Documentation/devicetree/bindings/display/arm,komeda.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt
Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt
Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt [moved from Documentation/devicetree/bindings/display/panel/auo,g101evn010 with 100% similarity]
Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/renesas,du.txt
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
Documentation/devicetree/bindings/gpu/samsung-rotator.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/gpu/afbc.rst [new file with mode: 0644]
Documentation/gpu/dp-mst/topology-figure-1.dot [new file with mode: 0644]
Documentation/gpu/dp-mst/topology-figure-2.dot [new file with mode: 0644]
Documentation/gpu/dp-mst/topology-figure-3.dot [new file with mode: 0644]
Documentation/gpu/drivers.rst
Documentation/gpu/drm-internals.rst
Documentation/gpu/drm-kms-helpers.rst
Documentation/gpu/drm-kms.rst
Documentation/gpu/komeda-kms.rst [new file with mode: 0644]
Documentation/gpu/todo.rst
MAINTAINERS
drivers/acpi/pmic/intel_pmic.c
drivers/acpi/pmic/intel_pmic.h
drivers/acpi/pmic/intel_pmic_chtwc.c
drivers/acpi/pmic/intel_pmic_xpower.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
drivers/gpu/drm/arc/arcpgu_crtc.c
drivers/gpu/drm/arc/arcpgu_drv.c
drivers/gpu/drm/arc/arcpgu_sim.c
drivers/gpu/drm/arm/Kconfig
drivers/gpu/drm/arm/Makefile
drivers/gpu/drm/arm/display/Kbuild [new file with mode: 0644]
drivers/gpu/drm/arm/display/Kconfig [new file with mode: 0644]
drivers/gpu/drm/arm/display/include/malidp_io.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/include/malidp_product.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/include/malidp_utils.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/Makefile [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_crtc.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_dev.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_dev.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_drv.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_kms.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_kms.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_plane.c [new file with mode: 0644]
drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c [new file with mode: 0644]
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/armada/armada_510.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_crtc.h
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_fb.c
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bochs/Makefile
drivers/gpu/drm/bochs/bochs.h
drivers/gpu/drm/bochs/bochs_drv.c
drivers/gpu/drm/bochs/bochs_fbdev.c [deleted file]
drivers/gpu/drm/bochs/bochs_hw.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/bridge/adv7511/adv7511.h
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/adv7511/adv7533.c
drivers/gpu/drm/bridge/analogix-anx78xx.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/cdns-dsi.c
drivers/gpu/drm/bridge/dumb-vga-dac.c
drivers/gpu/drm/bridge/lvds-encoder.c
drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
drivers/gpu/drm/bridge/nxp-ptn3460.c
drivers/gpu/drm/bridge/panel.c
drivers/gpu/drm/bridge/parade-ps8622.c
drivers/gpu/drm/bridge/sii902x.c
drivers/gpu/drm/bridge/sil-sii8620.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
drivers/gpu/drm/bridge/tc358764.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/bridge/ti-tfp410.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_bridge.c
drivers/gpu/drm/drm_color_mgmt.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_damage_helper.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_flip_work.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_framebuffer_helper.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_modeset_helper.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_simple_kms_helper.c
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/regs-scaler.h
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_intel_drv.h
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
drivers/gpu/drm/i2c/ch7006_drv.c
drivers/gpu/drm/i2c/ch7006_priv.h
drivers/gpu/drm/i2c/sil164_drv.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo.h
drivers/gpu/drm/i915/gvt/Makefile
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/display.h
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/edid.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/hypercall.h
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/mpt.h
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/trace.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_active.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_active.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_active_types.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence_reg.c
drivers/gpu/drm/i915/i915_gem_fence_reg.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_ioc32.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_query.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_reset.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_reset.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/i915_selftest.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_timeline.c
drivers/gpu/drm/i915/i915_timeline.h
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/icl_dsi.c
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_connector.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpio_phy.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dpll_mgr.h
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_vbt.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_fifo_underrun.c
drivers/gpu/drm/i915/intel_frontbuffer.c
drivers/gpu/drm/i915/intel_gpu_commands.h
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_fw.c
drivers/gpu/drm/i915/intel_guc_log.c
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/i915/intel_hdcp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_huc.c
drivers/gpu/drm/i915/intel_huc_fw.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_mocs.h
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pipe_crc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/i915/intel_uc_fw.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_vdsc.c
drivers/gpu/drm/i915/intel_wopcm.c
drivers/gpu/drm/i915/intel_workarounds.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_active.c [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/i915_gem.c
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/i915/selftests/i915_random.c
drivers/gpu/drm/i915/selftests/i915_random.h
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/i915_selftest.c
drivers/gpu/drm/i915/selftests/i915_timeline.c
drivers/gpu/drm/i915/selftests/i915_vma.c
drivers/gpu/drm/i915/selftests/igt_live_test.c [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/igt_live_test.h [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/igt_spinner.c
drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c [deleted file]
drivers/gpu/drm/i915/selftests/intel_guc.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/intel_lrc.c
drivers/gpu/drm/i915/selftests/intel_workarounds.c
drivers/gpu/drm/i915/selftests/lib_sw_fence.c
drivers/gpu/drm/i915/selftests/lib_sw_fence.h
drivers/gpu/drm/i915/selftests/mock_context.c
drivers/gpu/drm/i915/selftests/mock_engine.c
drivers/gpu/drm/i915/selftests/mock_engine.h
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_gtt.c
drivers/gpu/drm/i915/selftests/mock_gtt.h
drivers/gpu/drm/i915/selftests/mock_timeline.c
drivers/gpu/drm/i915/vlv_dsi.c
drivers/gpu/drm/i915/vlv_dsi_pll.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_fb.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/meson/meson_crtc.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/meson/meson_venc.c
drivers/gpu/drm/meson/meson_venc_cvbs.c
drivers/gpu/drm/mga/mga_drv.c
drivers/gpu/drm/mgag200/mgag200_fb.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/edp/edp_bridge.c
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/mxsfb/mxsfb_crtc.c
drivers/gpu/drm/mxsfb/mxsfb_drv.c
drivers/gpu/drm/mxsfb/mxsfb_drv.h
drivers/gpu/drm/mxsfb/mxsfb_out.c
drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-innolux-p079zca.c
drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panel/panel-sitronix-st7701.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-tpo-tpg110.c [new file with mode: 0644]
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/qxl/Makefile
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_draw.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_dumb.c
drivers/gpu/drm/qxl/qxl_fb.c [deleted file]
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_prime.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/r128/r128_drv.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.h
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_drv.h
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.h
drivers/gpu/drm/rcar-du/rcar_du_group.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_plane.h
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.h
drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/rcar-du/rcar_lvds.h [new file with mode: 0644]
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/cdn-dp-core.h
drivers/gpu/drm/rockchip/cdn-dp-reg.c
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
drivers/gpu/drm/rockchip/rockchip_drm_psr.c
drivers/gpu/drm/rockchip/rockchip_drm_psr.h
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_lvds.c
drivers/gpu/drm/rockchip/rockchip_rgb.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.h
drivers/gpu/drm/savage/savage_state.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/shmobile/shmob_drm_kms.c
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_tvout.c
drivers/gpu/drm/stm/drv.c
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_frontend.c
drivers/gpu/drm/sun4i/sun4i_frontend.h
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_layer.c
drivers/gpu/drm/sun4i/sun4i_lvds.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tv.c
drivers/gpu/drm/sun4i/sun6i_drc.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_ui_layer.c
drivers/gpu/drm/sun4i/sun8i_vi_layer.c
drivers/gpu/drm/tegra/Makefile
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tegra/hda.c [new file with mode: 0644]
drivers/gpu/drm/tegra/hda.h [new file with mode: 0644]
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/tegra/vic.h
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_drv.h
drivers/gpu/drm/tilcdc/tilcdc_external.c
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
drivers/gpu/drm/tinydrm/core/tinydrm-core.c
drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
drivers/gpu/drm/tinydrm/hx8357d.c
drivers/gpu/drm/tinydrm/ili9225.c
drivers/gpu/drm/tinydrm/ili9341.c
drivers/gpu/drm/tinydrm/mi0283qt.c
drivers/gpu/drm/tinydrm/mipi-dbi.c
drivers/gpu/drm/tinydrm/repaper.c
drivers/gpu/drm/tinydrm/st7586.c
drivers/gpu/drm/tinydrm/st7735r.c
drivers/gpu/drm/tve200/tve200_drv.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_txp.c
drivers/gpu/drm/vc4/vc4_vec.c
drivers/gpu/drm/via/via_dmablit.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/virtio/Makefile
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drm_bus.c [deleted file]
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_kms.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vkms/vkms_crtc.c
drivers/gpu/drm/vkms/vkms_drv.c
drivers/gpu/drm/vkms/vkms_output.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front_conn.c
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/gpu/drm/xen/xen_drm_front_kms.c
drivers/gpu/drm/zte/zx_drm_drv.c
drivers/gpu/drm/zte/zx_hdmi.c
drivers/gpu/drm/zte/zx_tvenc.c
drivers/gpu/drm/zte/zx_vga.c
drivers/gpu/drm/zte/zx_vou.c
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/cdma.h
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/gpu/host1x/hw/cdma_hw.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/host1x06_hardware.h
drivers/gpu/host1x/hw/host1x07_hardware.h
drivers/gpu/host1x/hw/hw_host1x06_channel.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x07_channel.h [new file with mode: 0644]
drivers/staging/vboxvideo/vbox_drv.c
drivers/staging/vboxvideo/vbox_fb.c
drivers/staging/vboxvideo/vbox_irq.c
drivers/staging/vboxvideo/vbox_mode.c
include/drm/bridge/dw_hdmi.h
include/drm/bridge/dw_mipi_dsi.h
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_bridge.h
include/drm/drm_color_mgmt.h
include/drm/drm_connector.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_damage_helper.h
include/drm/drm_device.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_drv.h
include/drm/drm_edid.h
include/drm/drm_encoder_slave.h
include/drm/drm_fb_cma_helper.h
include/drm/drm_fourcc.h
include/drm/drm_framebuffer.h
include/drm/drm_gem_cma_helper.h
include/drm/drm_gem_framebuffer_helper.h
include/drm/drm_modes.h
include/drm/drm_probe_helper.h [new file with mode: 0644]
include/drm/drm_util.h
include/drm/drm_vblank.h
include/drm/i915_pciids.h
include/drm/tinydrm/mipi-dbi.h
include/drm/tinydrm/tinydrm-helpers.h
include/drm/tinydrm/tinydrm.h
include/linux/dma-fence-array.h
include/linux/hdmi.h
include/linux/mfd/intel_soc_pmic.h
include/trace/events/host1x.h
include/uapi/drm/drm_fourcc.h
include/uapi/drm/i915_drm.h

diff --git a/Documentation/devicetree/bindings/display/arm,komeda.txt b/Documentation/devicetree/bindings/display/arm,komeda.txt
new file mode 100644 (file)
index 0000000..02b2265
--- /dev/null
@@ -0,0 +1,73 @@
+Device Tree bindings for Arm Komeda display driver
+
+Required properties:
+- compatible: Should be "arm,mali-d71"
+- reg: Physical base address and length of the registers in the system
+- interrupts: the interrupt line number of the device in the system
+- clocks: A list of phandle + clock-specifier pairs, one for each entry
+    in 'clock-names'
+- clock-names: A list of clock names. It should contain:
+      - "mclk": for the main processor clock
+      - "pclk": for the APB interface clock
+- #address-cells: Must be 1
+- #size-cells: Must be 0
+
+Required properties for sub-node: pipeline@nq
+Each device contains one or two pipeline sub-nodes (at least one), each
+pipeline node should provide properties:
+- reg: Zero-indexed identifier for the pipeline
+- clocks: A list of phandle + clock-specifier pairs, one for each entry
+    in 'clock-names'
+- clock-names: should contain:
+      - "pxclk": pixel clock
+      - "aclk": AXI interface clock
+
+- port: each pipeline connect to an encoder input port. The connection is
+    modeled using the OF graph bindings specified in
+    Documentation/devicetree/bindings/graph.txt
+
+Optional properties:
+  - memory-region: phandle to a node describing memory (see
+    Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
+    to be used for the framebuffer; if not present, the framebuffer may
+    be located anywhere in memory.
+
+Example:
+/ {
+       ...
+
+       dp0: display@c00000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "arm,mali-d71";
+               reg = <0xc00000 0x20000>;
+               interrupts = <0 168 4>;
+               clocks = <&dpu_mclk>, <&dpu_aclk>;
+               clock-names = "mclk", "pclk";
+
+               dp0_pipe0: pipeline@0 {
+                       clocks = <&fpgaosc2>, <&dpu_aclk>;
+                       clock-names = "pxclk", "aclk";
+                       reg = <0>;
+
+                       port {
+                               dp0_pipe0_out: endpoint {
+                                       remote-endpoint = <&db_dvi0_in>;
+                               };
+                       };
+               };
+
+               dp0_pipe1: pipeline@1 {
+                       clocks = <&fpgaosc2>, <&dpu_aclk>;
+                       clock-names = "pxclk", "aclk";
+                       reg = <1>;
+
+                       port {
+                               dp0_pipe1_out: endpoint {
+                                       remote-endpoint = <&db_dvi1_in>;
+                               };
+                       };
+               };
+       };
+       ...
+};
index 50220190c20330a47a1425e2e2798e59c8f1652b..60091db5dfa5218958ff35133df0e3c02b4fdc5c 100644 (file)
@@ -22,13 +22,11 @@ among others.
 
 Required properties:
 
-- compatible: Must be one or more of the following
-  - "ti,ds90c185" for the TI DS90C185 FPD-Link Serializer
-  - "lvds-encoder" for a generic LVDS encoder device
+- compatible: Must be "lvds-encoder"
 
-  When compatible with the generic version, nodes must list the
-  device-specific version corresponding to the device first
-  followed by the generic version.
+  Any encoder compatible with this generic binding, but with additional
+  properties not listed here, must list a device specific compatible first
+  followed by this generic compatible.
 
 Required nodes:
 
@@ -44,8 +42,6 @@ Example
 
 lvds-encoder {
        compatible = "lvds-encoder";
-       #address-cells = <1>;
-       #size-cells = <0>;
 
        ports {
                #address-cells = <1>;
index ba5469dd09f35393e339af8000d6ca776d1c2f31..900a884ad9f5d2e27742af3472b7147cad4b0690 100644 (file)
@@ -8,6 +8,8 @@ Required properties:
 
 - compatible : Shall contain one of
   - "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
+  - "renesas,r8a7744-lvds" for R8A7744 (RZ/G1N) compatible LVDS encoders
+  - "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
   - "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
   - "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
   - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
@@ -25,7 +27,7 @@ Required properties:
 - clock-names: Name of the clocks. This property is model-dependent.
   - The functional clock, which mandatory for all models, shall be listed
     first, and shall be named "fck".
-  - On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or
+  - On R8A77990, R8A77995 and R8A774C0, the LVDS encoder can use the EXTAL or
     DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be
     named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN
     numerical index.
index 527e236e9a2a126dcdd814e3b50c972a7145fe44..fee3c88e1a176e6388a49806ed65dca4471e7d42 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
 
 Optional properties:
 
-- pwdn-gpios: Power down control GPIO
+- powerdown-gpios: Power down control GPIO (the /PWDN pin, active low).
 
 Required nodes:
 
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt b/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt
new file mode 100644 (file)
index 0000000..e575f99
--- /dev/null
@@ -0,0 +1,55 @@
+Texas Instruments FPD-Link (LVDS) Serializer
+--------------------------------------------
+
+The DS90C185 and DS90C187 are low-power serializers for portable
+battery-powered applications that reduces the size of the RGB
+interface between the host GPU and the display.
+
+Required properties:
+
+- compatible: Should be
+  "ti,ds90c185", "lvds-encoder"  for the TI DS90C185 FPD-Link Serializer
+  "ti,ds90c187", "lvds-encoder"  for the TI DS90C187 FPD-Link Serializer
+
+Optional properties:
+
+- powerdown-gpios: Power down control GPIO (the PDB pin, active-low)
+
+Required nodes:
+
+The devices have two video ports. Their connections are modeled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for parallel input
+- Video port 1 for LVDS output
+
+
+Example
+-------
+
+lvds-encoder {
+       compatible = "ti,ds90c185", "lvds-encoder";
+
+       powerdown-gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+
+       ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               port@0 {
+                       reg = <0>;
+
+                       lvds_enc_in: endpoint {
+                               remote-endpoint = <&lcdc_out_rgb>;
+                       };
+               };
+
+               port@1 {
+                       reg = <1>;
+
+                       lvds_enc_out: endpoint {
+                               remote-endpoint = <&lvds_panel_in>;
+                       };
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt b/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt
new file mode 100644 (file)
index 0000000..74ee7ea
--- /dev/null
@@ -0,0 +1,12 @@
+LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "lemaker,bl035-rgb-002"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.txt b/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.txt
new file mode 100644 (file)
index 0000000..1639fb1
--- /dev/null
@@ -0,0 +1,14 @@
+PDA 91-00156-A0 5.0" WVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "pda,91-00156-a0"
+- power-supply: this panel requires a single power supply. A phandle to a
+regulator needs to be specified here. Compatible with panel-common binding which
+is specified in the panel-common.txt in this directory.
+- backlight: this panel's backlight is controlled by an external backlight
+controller. A phandle to this controller needs to be specified here.
+Compatible with panel-common binding which is specified in the panel-common.txt
+in this directory.
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
new file mode 100644 (file)
index 0000000..ccd1759
--- /dev/null
@@ -0,0 +1,30 @@
+Sitronix ST7701 based LCD panels
+
+ST7701 designed for small and medium sizes of TFT LCD display, is
+capable of supporting up to 480RGBX864 in resolution. It provides
+several system interfaces like MIPI/RGB/SPI.
+
+Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has
+inbuilt ST7701 chip.
+
+Required properties:
+- compatible: must be "sitronix,st7701" and one of
+  * "techstar,ts8550b"
+- reset-gpios: a GPIO phandle for the reset pin
+
+Required properties for techstar,ts8550b:
+- reg: DSI virtual channel used by that screen
+- VCC-supply: analog regulator for MIPI circuit
+- IOVCC-supply: I/O system regulator
+
+Optional properties:
+- backlight: phandle for the backlight control.
+
+panel@0 {
+       compatible = "techstar,ts8550b", "sitronix,st7701";
+       reg = <0>;
+       VCC-supply = <&reg_dldo2>;
+       IOVCC-supply = <&reg_dldo2>;
+       reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
+       backlight = <&backlight>;
+};
index 3c855d9f27193bcb03f365cc8c4a3a78a5d478a0..aedb22b4d1613d1ff69b833a5babd227892538e0 100644 (file)
@@ -7,6 +7,7 @@ Required Properties:
     - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
     - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
     - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
+    - "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
     - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
     - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
     - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -57,6 +58,7 @@ corresponding to each DU output.
  R8A7744 (RZ/G1N)       DPAD 0         LVDS 0         -              -
  R8A7745 (RZ/G1E)       DPAD 0         DPAD 1         -              -
  R8A77470 (RZ/G1C)      DPAD 0         DPAD 1         LVDS 0         -
+ R8A774C0 (RZ/G2E)      DPAD 0         LVDS 0         LVDS 1         -
  R8A7779 (R-Car H1)     DPAD 0         DPAD 1         -              -
  R8A7790 (R-Car H2)     DPAD 0         LVDS 0         LVDS 1         -
  R8A7791 (R-Car M2-W)   DPAD 0         LVDS 0         -              -
index b79e5769f0ae7d1b28835d73f9886d83d42fb389..4f58c5a2d19501c92b4388dbb9acaacac8001759 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
                "rockchip,rk3126-vop";
                "rockchip,px30-vop-lit";
                "rockchip,px30-vop-big";
+               "rockchip,rk3066-vop";
                "rockchip,rk3188-vop";
                "rockchip,rk3288-vop";
                "rockchip,rk3368-vop";
index f426bdb42f18ce8d2f153bbcead91f7b593ad13d..31ab72cba3d47e5ba47701342404c8cfa53809c4 100644 (file)
@@ -156,6 +156,7 @@ Required properties:
    * allwinner,sun6i-a31-tcon
    * allwinner,sun6i-a31s-tcon
    * allwinner,sun7i-a20-tcon
+   * allwinner,sun8i-a23-tcon
    * allwinner,sun8i-a33-tcon
    * allwinner,sun8i-a83t-tcon-lcd
    * allwinner,sun8i-a83t-tcon-tv
@@ -276,6 +277,7 @@ Required properties:
   - compatible: value must be one of:
     * allwinner,sun6i-a31-drc
     * allwinner,sun6i-a31s-drc
+    * allwinner,sun8i-a23-drc
     * allwinner,sun8i-a33-drc
     * allwinner,sun9i-a80-drc
   - reg: base address and size of the memory-mapped region.
@@ -303,6 +305,7 @@ Required properties:
     * allwinner,sun5i-a13-display-backend
     * allwinner,sun6i-a31-display-backend
     * allwinner,sun7i-a20-display-backend
+    * allwinner,sun8i-a23-display-backend
     * allwinner,sun8i-a33-display-backend
     * allwinner,sun9i-a80-display-backend
   - reg: base address and size of the memory-mapped region.
@@ -360,6 +363,7 @@ Required properties:
     * allwinner,sun5i-a13-display-frontend
     * allwinner,sun6i-a31-display-frontend
     * allwinner,sun7i-a20-display-frontend
+    * allwinner,sun8i-a23-display-frontend
     * allwinner,sun8i-a33-display-frontend
     * allwinner,sun9i-a80-display-frontend
   - reg: base address and size of the memory-mapped region.
@@ -419,6 +423,7 @@ Required properties:
     * allwinner,sun6i-a31-display-engine
     * allwinner,sun6i-a31s-display-engine
     * allwinner,sun7i-a20-display-engine
+    * allwinner,sun8i-a23-display-engine
     * allwinner,sun8i-a33-display-engine
     * allwinner,sun8i-a83t-display-engine
     * allwinner,sun8i-h3-display-engine
index 593be44a53c9527774c35ca9331f9b25e23d97cd..9999255ac5b611106e9bf7b6e55ade37096580c0 100644 (file)
@@ -238,6 +238,9 @@ of the following host1x client modules:
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
   - nvidia,panel: phandle of a display panel
+  - nvidia,xbar-cfg: 5 cells containing the crossbar configuration. Each lane
+    of the SOR, identified by the cell's index, is mapped via the crossbar to
+    the pad specified by the cell's value.
 
   Optional properties when driving an eDP output:
   - nvidia,dpaux: phandle to a DispayPort AUX interface
index 82cd1ed0be9317cef6b7ca457f5f904d9d0e16d7..3aca2578da0bd7c3aa938df1de046cb95e86905f 100644 (file)
@@ -2,9 +2,10 @@
 
 Required properties:
   - compatible : value should be one of the following:
-       (a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
-       (b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
-       (c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
+       * "samsung,s5pv210-rotator" for Rotator IP in S5PV210
+       * "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
+       * "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
+       * "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
 
   - reg : Physical base address of the IP registers and length of memory
          mapped region.
index 389508584f48d36414894f9da0f8e1044ef25593..4f225ce815d82ba37cc7b78e5cef7f9cd49e0207 100644 (file)
@@ -211,6 +211,7 @@ laird       Laird PLC
 lantiq Lantiq Semiconductor
 lattice        Lattice Semiconductor
 lego   LEGO Systems A/S
+lemaker        Shenzhen LeMaker Technology Co., Ltd.
 lenovo Lenovo Group Ltd.
 lg     LG Corporation
 libretech      Shenzhen Libre Technology Co., Ltd
@@ -297,6 +298,7 @@ ovti        OmniVision Technologies
 oxsemi Oxford Semiconductor, Ltd.
 panasonic      Panasonic Corporation
 parade Parade Technologies Inc.
+pda    Precision Design Associates, Inc.
 pericom        Pericom Technology Inc.
 pervasive      Pervasive Displays, Inc.
 phicomm PHICOMM Co., Ltd.
diff --git a/Documentation/gpu/afbc.rst b/Documentation/gpu/afbc.rst
new file mode 100644 (file)
index 0000000..4d38dc4
--- /dev/null
@@ -0,0 +1,235 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+===================================
+ Arm Framebuffer Compression (AFBC)
+===================================
+
+AFBC is a proprietary lossless image compression protocol and format.
+It provides fine-grained random access and minimizes the amount of
+data transferred between IP blocks.
+
+AFBC can be enabled on drivers which support it via use of the AFBC
+format modifiers defined in drm_fourcc.h. See DRM_FORMAT_MOD_ARM_AFBC(*).
+
+All users of the AFBC modifiers must follow the usage guidelines laid
+out in this document, to ensure compatibility across different AFBC
+producers and consumers.
+
+Components and Ordering
+=======================
+
+AFBC streams can contain several components - where a component
+corresponds to a color channel (i.e. R, G, B, X, A, Y, Cb, Cr).
+The assignment of input/output color channels must be consistent
+between the encoder and the decoder for correct operation, otherwise
+the consumer will interpret the decoded data incorrectly.
+
+Furthermore, when the lossless colorspace transform is used
+(AFBC_FORMAT_MOD_YTR, which should be enabled for RGB buffers for
+maximum compression efficiency), the component order must be:
+
+ * Component 0: R
+ * Component 1: G
+ * Component 2: B
+
+The component ordering is communicated via the fourcc code in the
+fourcc:modifier pair. In general, component '0' is considered to
+reside in the least-significant bits of the corresponding linear
+format. For example, COMP(bits):
+
+ * DRM_FORMAT_ABGR8888
+
+   * Component 0: R(8)
+   * Component 1: G(8)
+   * Component 2: B(8)
+   * Component 3: A(8)
+
+ * DRM_FORMAT_BGR888
+
+   * Component 0: R(8)
+   * Component 1: G(8)
+   * Component 2: B(8)
+
+ * DRM_FORMAT_YUYV
+
+   * Component 0: Y(8)
+   * Component 1: Cb(8, 2x1 subsampled)
+   * Component 2: Cr(8, 2x1 subsampled)
+
+In AFBC, 'X' components are not treated any differently from any other
+component. Therefore, an AFBC buffer with fourcc DRM_FORMAT_XBGR8888
+encodes with 4 components, like so:
+
+ * DRM_FORMAT_XBGR8888
+
+   * Component 0: R(8)
+   * Component 1: G(8)
+   * Component 2: B(8)
+   * Component 3: X(8)
+
+Please note, however, that the inclusion of a "wasted" 'X' channel is
+bad for compression efficiency, and so it's recommended to avoid
+formats containing 'X' bits. If a fourth component is
+required/expected by the encoder/decoder, then it is recommended to
+instead use an equivalent format with alpha, setting all alpha bits to
+'1'. If there is no requirement for a fourth component, then a format
+which doesn't include alpha can be used, e.g. DRM_FORMAT_BGR888.
+
+Number of Planes
+================
+
+Formats which are typically multi-planar in linear layouts (e.g. YUV
+420), can be encoded into one, or multiple, AFBC planes. As with
+component order, the encoder and decoder must agree about the number
+of planes in order to correctly decode the buffer. The fourcc code is
+used to determine the number of encoded planes in an AFBC buffer,
+matching the number of planes for the linear (unmodified) format.
+Within each plane, the component ordering also follows the fourcc
+code:
+
+For example:
+
+ * DRM_FORMAT_YUYV: nplanes = 1
+
+   * Plane 0:
+
+     * Component 0: Y(8)
+     * Component 1: Cb(8, 2x1 subsampled)
+     * Component 2: Cr(8, 2x1 subsampled)
+
+ * DRM_FORMAT_NV12: nplanes = 2
+
+   * Plane 0:
+
+     * Component 0: Y(8)
+
+   * Plane 1:
+
+     * Component 0: Cb(8, 2x1 subsampled)
+     * Component 1: Cr(8, 2x1 subsampled)
+
+Cross-device interoperability
+=============================
+
+For maximum compatibility across devices, the table below defines
+canonical formats for use between AFBC-enabled devices. Formats which
+are listed here must be used exactly as specified when using the AFBC
+modifiers. Formats which are not listed should be avoided.
+
+.. flat-table:: AFBC formats
+
+   * - Fourcc code
+     - Description
+     - Planes/Components
+
+   * - DRM_FORMAT_ABGR2101010
+     - 10-bit per component RGB, with 2-bit alpha
+     - Plane 0: 4 components
+              * Component 0: R(10)
+              * Component 1: G(10)
+              * Component 2: B(10)
+              * Component 3: A(2)
+
+   * - DRM_FORMAT_ABGR8888
+     - 8-bit per component RGB, with 8-bit alpha
+     - Plane 0: 4 components
+              * Component 0: R(8)
+              * Component 1: G(8)
+              * Component 2: B(8)
+              * Component 3: A(8)
+
+   * - DRM_FORMAT_BGR888
+     - 8-bit per component RGB
+     - Plane 0: 3 components
+              * Component 0: R(8)
+              * Component 1: G(8)
+              * Component 2: B(8)
+
+   * - DRM_FORMAT_BGR565
+     - 5/6-bit per component RGB
+     - Plane 0: 3 components
+              * Component 0: R(5)
+              * Component 1: G(6)
+              * Component 2: B(5)
+
+   * - DRM_FORMAT_ABGR1555
+     - 5-bit per component RGB, with 1-bit alpha
+     - Plane 0: 4 components
+              * Component 0: R(5)
+              * Component 1: G(5)
+              * Component 2: B(5)
+              * Component 3: A(1)
+
+   * - DRM_FORMAT_VUY888
+     - 8-bit per component YCbCr 444, single plane
+     - Plane 0: 3 components
+              * Component 0: Y(8)
+              * Component 1: Cb(8)
+              * Component 2: Cr(8)
+
+   * - DRM_FORMAT_VUY101010
+     - 10-bit per component YCbCr 444, single plane
+     - Plane 0: 3 components
+              * Component 0: Y(10)
+              * Component 1: Cb(10)
+              * Component 2: Cr(10)
+
+   * - DRM_FORMAT_YUYV
+     - 8-bit per component YCbCr 422, single plane
+     - Plane 0: 3 components
+              * Component 0: Y(8)
+              * Component 1: Cb(8, 2x1 subsampled)
+              * Component 2: Cr(8, 2x1 subsampled)
+
+   * - DRM_FORMAT_NV16
+     - 8-bit per component YCbCr 422, two plane
+     - Plane 0: 1 component
+              * Component 0: Y(8)
+       Plane 1: 2 components
+              * Component 0: Cb(8, 2x1 subsampled)
+              * Component 1: Cr(8, 2x1 subsampled)
+
+   * - DRM_FORMAT_Y210
+     - 10-bit per component YCbCr 422, single plane
+     - Plane 0: 3 components
+              * Component 0: Y(10)
+              * Component 1: Cb(10, 2x1 subsampled)
+              * Component 2: Cr(10, 2x1 subsampled)
+
+   * - DRM_FORMAT_P210
+     - 10-bit per component YCbCr 422, two plane
+     - Plane 0: 1 component
+              * Component 0: Y(10)
+       Plane 1: 2 components
+              * Component 0: Cb(10, 2x1 subsampled)
+              * Component 1: Cr(10, 2x1 subsampled)
+
+   * - DRM_FORMAT_YUV420_8BIT
+     - 8-bit per component YCbCr 420, single plane
+     - Plane 0: 3 components
+              * Component 0: Y(8)
+              * Component 1: Cb(8, 2x2 subsampled)
+              * Component 2: Cr(8, 2x2 subsampled)
+
+   * - DRM_FORMAT_YUV420_10BIT
+     - 10-bit per component YCbCr 420, single plane
+     - Plane 0: 3 components
+              * Component 0: Y(10)
+              * Component 1: Cb(10, 2x2 subsampled)
+              * Component 2: Cr(10, 2x2 subsampled)
+
+   * - DRM_FORMAT_NV12
+     - 8-bit per component YCbCr 420, two plane
+     - Plane 0: 1 component
+              * Component 0: Y(8)
+       Plane 1: 2 components
+              * Component 0: Cb(8, 2x2 subsampled)
+              * Component 1: Cr(8, 2x2 subsampled)
+
+   * - DRM_FORMAT_P010
+     - 10-bit per component YCbCr 420, two plane
+     - Plane 0: 1 component
+              * Component 0: Y(10)
+       Plane 1: 2 components
+              * Component 0: Cb(10, 2x2 subsampled)
+              * Component 1: Cr(10, 2x2 subsampled)
diff --git a/Documentation/gpu/dp-mst/topology-figure-1.dot b/Documentation/gpu/dp-mst/topology-figure-1.dot
new file mode 100644 (file)
index 0000000..157e17c
--- /dev/null
@@ -0,0 +1,52 @@
+digraph T {
+    /* Make sure our payloads are always drawn below the driver node */
+    subgraph cluster_driver {
+        fillcolor = grey;
+        style = filled;
+        driver -> {payload1, payload2} [dir=none];
+    }
+
+    /* Driver malloc references */
+    edge [style=dashed];
+    driver -> port1;
+    driver -> port2;
+    driver -> port3:e;
+    driver -> port4;
+
+    payload1:s -> port1:e;
+    payload2:s -> port3:e;
+    edge [style=""];
+
+    subgraph cluster_topology {
+        label="Topology Manager";
+        labelloc=bottom;
+
+        /* Topology references */
+        mstb1 -> {port1, port2};
+        port1 -> mstb2;
+        port2 -> mstb3 -> {port3, port4};
+        port3 -> mstb4;
+
+        /* Malloc references */
+        edge [style=dashed;dir=back];
+        mstb1 -> {port1, port2};
+        port1 -> mstb2;
+        port2 -> mstb3 -> {port3, port4};
+        port3 -> mstb4;
+    }
+
+    driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
+
+    payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
+    payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue];
+
+    mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen;shape=oval];
+    mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen;shape=oval];
+    mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;shape=oval];
+    mstb4 [label="MSTB #4";style=filled;fillcolor=palegreen;shape=oval];
+
+    port1 [label="Port #1";shape=oval];
+    port2 [label="Port #2";shape=oval];
+    port3 [label="Port #3";shape=oval];
+    port4 [label="Port #4";shape=oval];
+}
diff --git a/Documentation/gpu/dp-mst/topology-figure-2.dot b/Documentation/gpu/dp-mst/topology-figure-2.dot
new file mode 100644 (file)
index 0000000..4243dd1
--- /dev/null
@@ -0,0 +1,56 @@
+digraph T {
+    /* Make sure our payloads are always drawn below the driver node */
+    subgraph cluster_driver {
+        fillcolor = grey;
+        style = filled;
+        driver -> {payload1, payload2} [dir=none];
+    }
+
+    /* Driver malloc references */
+    edge [style=dashed];
+    driver -> port1;
+    driver -> port2;
+    driver -> port3:e;
+    driver -> port4 [color=red];
+
+    payload1:s -> port1:e;
+    payload2:s -> port3:e;
+    edge [style=""];
+
+    subgraph cluster_topology {
+        label="Topology Manager";
+        labelloc=bottom;
+
+        /* Topology references */
+        mstb1 -> {port1, port2};
+        port1 -> mstb2;
+        edge [color=red];
+        port2 -> mstb3 -> {port3, port4};
+        port3 -> mstb4;
+        edge [color=""];
+
+        /* Malloc references */
+        edge [style=dashed;dir=back];
+        mstb1 -> {port1, port2};
+        port1 -> mstb2;
+        port2 -> mstb3 -> port3;
+        edge [color=red];
+        mstb3 -> port4;
+        port3 -> mstb4;
+    }
+
+    mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen];
+    mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen];
+    mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen];
+    mstb4 [label="MSTB #4";style=filled;fillcolor=grey];
+
+    port1 [label="Port #1"];
+    port2 [label="Port #2"];
+    port3 [label="Port #3"];
+    port4 [label="Port #4";style=filled;fillcolor=grey];
+
+    driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
+
+    payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
+    payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue];
+}
diff --git a/Documentation/gpu/dp-mst/topology-figure-3.dot b/Documentation/gpu/dp-mst/topology-figure-3.dot
new file mode 100644 (file)
index 0000000..6cd78d0
--- /dev/null
@@ -0,0 +1,59 @@
+digraph T {
+    /* Make sure our payloads are always drawn below the driver node */
+    subgraph cluster_driver {
+        fillcolor = grey;
+        style = filled;
+        edge [dir=none];
+        driver -> payload1;
+        driver -> payload2 [penwidth=3];
+        edge [dir=""];
+    }
+
+    /* Driver malloc references */
+    edge [style=dashed];
+    driver -> port1;
+    driver -> port2;
+    driver -> port3:e;
+    driver -> port4 [color=grey];
+    payload1:s -> port1:e;
+    payload2:s -> port3:e [penwidth=3];
+    edge [style=""];
+
+    subgraph cluster_topology {
+        label="Topology Manager";
+        labelloc=bottom;
+
+        /* Topology references */
+        mstb1 -> {port1, port2};
+        port1 -> mstb2;
+        edge [color=grey];
+        port2 -> mstb3 -> {port3, port4};
+        port3 -> mstb4;
+        edge [color=""];
+
+        /* Malloc references */
+        edge [style=dashed;dir=back];
+        mstb1 -> {port1, port2};
+        port1 -> mstb2;
+        port2 -> mstb3 [penwidth=3];
+        mstb3 -> port3 [penwidth=3];
+        edge [color=grey];
+        mstb3 -> port4;
+        port3 -> mstb4;
+    }
+
+    mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen];
+    mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen];
+    mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;penwidth=3];
+    mstb4 [label="MSTB #4";style=filled;fillcolor=grey];
+
+    port1 [label="Port #1"];
+    port2 [label="Port #2";penwidth=5];
+    port3 [label="Port #3";penwidth=3];
+    port4 [label="Port #4";style=filled;fillcolor=grey];
+
+    driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
+
+    payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
+    payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue;penwidth=3];
+}
index 7c1672118a73f4c59f066ef78fa95badb6a1ce43..044a7025477c167e278e95eb15692cafd45c9734 100644 (file)
@@ -17,6 +17,8 @@ GPU Driver Documentation
    vkms
    bridge/dw-hdmi
    xen-front
+   afbc
+   komeda-kms
 
 .. only::  subproject and html
 
index 5ee9674fb9e981db2c17c0b6f1a08402b5ab5290..3ae23a5454aca4822944296acac375b658e63cf7 100644 (file)
@@ -39,68 +39,6 @@ sections.
 Driver Information
 ------------------
 
-Driver Features
-~~~~~~~~~~~~~~~
-
-Drivers inform the DRM core about their requirements and supported
-features by setting appropriate flags in the driver_features field.
-Since those flags influence the DRM core behaviour since registration
-time, most of them must be set to registering the :c:type:`struct
-drm_driver <drm_driver>` instance.
-
-u32 driver_features;
-
-DRIVER_USE_AGP
-    Driver uses AGP interface, the DRM core will manage AGP resources.
-
-DRIVER_LEGACY
-    Denote a legacy driver using shadow attach. Don't use.
-
-DRIVER_KMS_LEGACY_CONTEXT
-    Used only by nouveau for backwards compatibility with existing userspace.
-    Don't use.
-
-DRIVER_PCI_DMA
-    Driver is capable of PCI DMA, mapping of PCI DMA buffers to
-    userspace will be enabled. Deprecated.
-
-DRIVER_SG
-    Driver can perform scatter/gather DMA, allocation and mapping of
-    scatter/gather buffers will be enabled. Deprecated.
-
-DRIVER_HAVE_DMA
-    Driver supports DMA, the userspace DMA API will be supported.
-    Deprecated.
-
-DRIVER_HAVE_IRQ; DRIVER_IRQ_SHARED
-    DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
-    managed by the DRM Core. The core will support simple IRQ handler
-    installation when the flag is set. The installation process is
-    described in ?.
-
-    DRIVER_IRQ_SHARED indicates whether the device & handler support
-    shared IRQs (note that this is required of PCI drivers).
-
-DRIVER_GEM
-    Driver use the GEM memory manager.
-
-DRIVER_MODESET
-    Driver supports mode setting interfaces (KMS).
-
-DRIVER_PRIME
-    Driver implements DRM PRIME buffer sharing.
-
-DRIVER_RENDER
-    Driver supports dedicated render nodes.
-
-DRIVER_ATOMIC
-    Driver supports atomic properties. In this case the driver must
-    implement appropriate obj->atomic_get_property() vfuncs for any
-    modeset objects with driver specific properties.
-
-DRIVER_SYNCOBJ
-    Driver support drm sync objects.
-
 Major, Minor and Patchlevel
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -143,6 +81,9 @@ Device Instance and Driver Handling
 .. kernel-doc:: drivers/gpu/drm/drm_drv.c
    :doc: driver instance overview
 
+.. kernel-doc:: include/drm/drm_device.h
+   :internal:
+
 .. kernel-doc:: include/drm/drm_drv.h
    :internal:
 
@@ -230,6 +171,15 @@ Printer
 .. kernel-doc:: drivers/gpu/drm/drm_print.c
    :export:
 
+Utilities
+---------
+
+.. kernel-doc:: include/drm/drm_util.h
+   :doc: drm utils
+
+.. kernel-doc:: include/drm/drm_util.h
+   :internal:
+
 
 Legacy Support Code
 ===================
index b422eb8edf1627baba10a231560eca754e31e846..17ca7f8bf3d3c21660e5c17638642955b82f9045 100644 (file)
@@ -116,8 +116,6 @@ Framebuffer CMA Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
    :export:
 
-.. _drm_bridges:
-
 Framebuffer GEM Helper Reference
 ================================
 
@@ -127,6 +125,8 @@ Framebuffer GEM Helper Reference
 .. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
    :export:
 
+.. _drm_bridges:
+
 Bridges
 =======
 
@@ -208,18 +208,40 @@ Display Port Dual Mode Adaptor Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
    :export:
 
-Display Port MST Helper Functions Reference
-===========================================
+Display Port MST Helpers
+========================
+
+Overview
+--------
 
 .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
    :doc: dp mst helper
 
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+   :doc: Branch device and port refcounting
+
+Functions Reference
+-------------------
+
 .. kernel-doc:: include/drm/drm_dp_mst_helper.h
    :internal:
 
 .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
    :export:
 
+Topology Lifetime Internals
+---------------------------
+
+These functions aren't exported to drivers, but are documented here to help make
+the MST topology helpers easier to understand
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+   :functions: drm_dp_mst_topology_try_get_mstb drm_dp_mst_topology_get_mstb
+               drm_dp_mst_topology_put_mstb
+               drm_dp_mst_topology_try_get_port drm_dp_mst_topology_get_port
+               drm_dp_mst_topology_put_port
+               drm_dp_mst_get_mstb_malloc drm_dp_mst_put_mstb_malloc
+
 MIPI DSI Helper Functions Reference
 ===================================
 
@@ -274,18 +296,6 @@ SCDC Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_scdc_helper.c
    :export:
 
-Rectangle Utilities Reference
-=============================
-
-.. kernel-doc:: include/drm/drm_rect.h
-   :doc: rect utils
-
-.. kernel-doc:: include/drm/drm_rect.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_rect.c
-   :export:
-
 HDMI Infoframes Helper Reference
 ================================
 
@@ -300,6 +310,18 @@ libraries and hence is also included here.
 .. kernel-doc:: drivers/video/hdmi.c
    :export:
 
+Rectangle Utilities Reference
+=============================
+
+.. kernel-doc:: include/drm/drm_rect.h
+   :doc: rect utils
+
+.. kernel-doc:: include/drm/drm_rect.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_rect.c
+   :export:
+
 Flip-work Helper Reference
 ==========================
 
index 75c882e09feec13081b34381ad2e2d1def67e70c..23a3c986ef6d4abed2b4987df09c4183e034e0d8 100644 (file)
@@ -410,102 +410,6 @@ Encoder Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_encoder.c
    :export:
 
-KMS Initialization and Cleanup
-==============================
-
-A KMS device is abstracted and exposed as a set of planes, CRTCs,
-encoders and connectors. KMS drivers must thus create and initialize all
-those objects at load time after initializing mode setting.
-
-CRTCs (:c:type:`struct drm_crtc <drm_crtc>`)
---------------------------------------------
-
-A CRTC is an abstraction representing a part of the chip that contains a
-pointer to a scanout buffer. Therefore, the number of CRTCs available
-determines how many independent scanout buffers can be active at any
-given time. The CRTC structure contains several fields to support this:
-a pointer to some video memory (abstracted as a frame buffer object), a
-display mode, and an (x, y) offset into the video memory to support
-panning or configurations where one piece of video memory spans multiple
-CRTCs.
-
-CRTC Initialization
-~~~~~~~~~~~~~~~~~~~
-
-A KMS device must create and register at least one struct
-:c:type:`struct drm_crtc <drm_crtc>` instance. The instance is
-allocated and zeroed by the driver, possibly as part of a larger
-structure, and registered with a call to :c:func:`drm_crtc_init()`
-with a pointer to CRTC functions.
-
-
-Cleanup
--------
-
-The DRM core manages its objects' lifetime. When an object is not needed
-anymore the core calls its destroy function, which must clean up and
-free every resource allocated for the object. Every
-:c:func:`drm_\*_init()` call must be matched with a corresponding
-:c:func:`drm_\*_cleanup()` call to cleanup CRTCs
-(:c:func:`drm_crtc_cleanup()`), planes
-(:c:func:`drm_plane_cleanup()`), encoders
-(:c:func:`drm_encoder_cleanup()`) and connectors
-(:c:func:`drm_connector_cleanup()`). Furthermore, connectors that
-have been added to sysfs must be removed by a call to
-:c:func:`drm_connector_unregister()` before calling
-:c:func:`drm_connector_cleanup()`.
-
-Connectors state change detection must be cleanup up with a call to
-:c:func:`drm_kms_helper_poll_fini()`.
-
-Output discovery and initialization example
--------------------------------------------
-
-.. code-block:: c
-
-    void intel_crt_init(struct drm_device *dev)
-    {
-        struct drm_connector *connector;
-        struct intel_output *intel_output;
-
-        intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
-        if (!intel_output)
-            return;
-
-        connector = &intel_output->base;
-        drm_connector_init(dev, &intel_output->base,
-                   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
-
-        drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
-                 DRM_MODE_ENCODER_DAC);
-
-        drm_connector_attach_encoder(&intel_output->base,
-                          &intel_output->enc);
-
-        /* Set up the DDC bus. */
-        intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
-        if (!intel_output->ddc_bus) {
-            dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-                   "failed.\n");
-            return;
-        }
-
-        intel_output->type = INTEL_OUTPUT_ANALOG;
-        connector->interlace_allowed = 0;
-        connector->doublescan_allowed = 0;
-
-        drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
-        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
-
-        drm_connector_register(connector);
-    }
-
-In the example above (taken from the i915 driver), a CRTC, connector and
-encoder combination is created. A device-specific i2c bus is also
-created for fetching EDID data and performing monitor detection. Once
-the process is complete, the new connector is registered with sysfs to
-make its properties available to applications.
-
 KMS Locking
 ===========
 
diff --git a/Documentation/gpu/komeda-kms.rst b/Documentation/gpu/komeda-kms.rst
new file mode 100644 (file)
index 0000000..b08da1c
--- /dev/null
@@ -0,0 +1,488 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+ drm/komeda Arm display driver
+==============================
+
+The drm/komeda driver supports the Arm display processor D71 and later products,
+this document gives a brief overview of driver design: how it works and why
+design it like that.
+
+Overview of D71 like display IPs
+================================
+
+From D71, Arm display IP begins to adopt a flexible and modularized
+architecture. A display pipeline is made up of multiple individual and
+functional pipeline stages called components, and every component has some
+specific capabilities that can give the flowed pipeline pixel data a
+particular processing.
+
+Typical D71 components:
+
+Layer
+-----
+Layer is the first pipeline stage, which prepares the pixel data for the next
+stage. It fetches the pixel from memory, decodes it if it's AFBC, rotates the
+source image, unpacks or converts YUV pixels to the device internal RGB pixels,
+then adjusts the color_space of pixels if needed.
+
+Scaler
+------
+As its name suggests, scaler takes responsibility for scaling, and D71 also
+supports image enhancements by scaler.
+The usage of scaler is very flexible and can be connected to layer output
+for layer scaling, or connected to compositor and scale the whole display
+frame and then feed the output data into wb_layer which will then write it
+into memory.
+
+Compositor (compiz)
+-------------------
+Compositor blends multiple layers or pixel data flows into one single display
+frame. its output frame can be fed into post image processor for showing it on
+the monitor or fed into wb_layer and written to memory at the same time.
+user can also insert a scaler between compositor and wb_layer to down scale
+the display frame first and and then write to memory.
+
+Writeback Layer (wb_layer)
+--------------------------
+Writeback layer does the opposite things of Layer, which connects to compiz
+and writes the composition result to memory.
+
+Post image processor (improc)
+-----------------------------
+Post image processor adjusts frame data like gamma and color space to fit the
+requirements of the monitor.
+
+Timing controller (timing_ctrlr)
+--------------------------------
+Final stage of display pipeline, Timing controller is not for the pixel
+handling, but only for controlling the display timing.
+
+Merger
+------
+D71 scaler mostly only has the half horizontal input/output capabilities
+compared with Layer, like if Layer supports 4K input size, the scaler only can
+support 2K input/output in the same time. To achieve the ful frame scaling, D71
+introduces Layer Split, which splits the whole image to two half parts and feeds
+them to two Layers A and B, and does the scaling independently. After scaling
+the result need to be fed to merger to merge two part images together, and then
+output merged result to compiz.
+
+Splitter
+--------
+Similar to Layer Split, but Splitter is used for writeback, which splits the
+compiz result to two parts and then feed them to two scalers.
+
+Possible D71 Pipeline usage
+===========================
+
+Benefitting from the modularized architecture, D71 pipelines can be easily
+adjusted to fit different usages. And D71 has two pipelines, which support two
+types of working mode:
+
+-   Dual display mode
+    Two pipelines work independently and separately to drive two display outputs.
+
+-   Single display mode
+    Two pipelines work together to drive only one display output.
+
+    On this mode, pipeline_B doesn't work indenpendently, but outputs its
+    composition result into pipeline_A, and its pixel timing also derived from
+    pipeline_A.timing_ctrlr. The pipeline_B works just like a "slave" of
+    pipeline_A(master)
+
+Single pipeline data flow
+-------------------------
+
+.. kernel-render:: DOT
+   :alt: Single pipeline digraph
+   :caption: Single pipeline data flow
+
+   digraph single_ppl {
+      rankdir=LR;
+
+      subgraph {
+         "Memory";
+         "Monitor";
+      }
+
+      subgraph cluster_pipeline {
+          style=dashed
+          node [shape=box]
+          {
+              node [bgcolor=grey style=dashed]
+              "Scaler-0";
+              "Scaler-1";
+              "Scaler-0/1"
+          }
+
+         node [bgcolor=grey style=filled]
+         "Layer-0" -> "Scaler-0"
+         "Layer-1" -> "Scaler-0"
+         "Layer-2" -> "Scaler-1"
+         "Layer-3" -> "Scaler-1"
+
+         "Layer-0" -> "Compiz"
+         "Layer-1" -> "Compiz"
+         "Layer-2" -> "Compiz"
+         "Layer-3" -> "Compiz"
+         "Scaler-0" -> "Compiz"
+         "Scaler-1" -> "Compiz"
+
+         "Compiz" -> "Scaler-0/1" -> "Wb_layer"
+         "Compiz" -> "Improc" -> "Timing Controller"
+      }
+
+      "Wb_layer" -> "Memory"
+      "Timing Controller" -> "Monitor"
+   }
+
+Dual pipeline with Slave enabled
+--------------------------------
+
+.. kernel-render:: DOT
+   :alt: Slave pipeline digraph
+   :caption: Slave pipeline enabled data flow
+
+   digraph slave_ppl {
+      rankdir=LR;
+
+      subgraph {
+         "Memory";
+         "Monitor";
+      }
+      node [shape=box]
+      subgraph cluster_pipeline_slave {
+          style=dashed
+          label="Slave Pipeline_B"
+          node [shape=box]
+          {
+              node [bgcolor=grey style=dashed]
+              "Slave.Scaler-0";
+              "Slave.Scaler-1";
+          }
+
+         node [bgcolor=grey style=filled]
+         "Slave.Layer-0" -> "Slave.Scaler-0"
+         "Slave.Layer-1" -> "Slave.Scaler-0"
+         "Slave.Layer-2" -> "Slave.Scaler-1"
+         "Slave.Layer-3" -> "Slave.Scaler-1"
+
+         "Slave.Layer-0" -> "Slave.Compiz"
+         "Slave.Layer-1" -> "Slave.Compiz"
+         "Slave.Layer-2" -> "Slave.Compiz"
+         "Slave.Layer-3" -> "Slave.Compiz"
+         "Slave.Scaler-0" -> "Slave.Compiz"
+         "Slave.Scaler-1" -> "Slave.Compiz"
+      }
+
+      subgraph cluster_pipeline_master {
+          style=dashed
+          label="Master Pipeline_A"
+          node [shape=box]
+          {
+              node [bgcolor=grey style=dashed]
+              "Scaler-0";
+              "Scaler-1";
+              "Scaler-0/1"
+          }
+
+         node [bgcolor=grey style=filled]
+         "Layer-0" -> "Scaler-0"
+         "Layer-1" -> "Scaler-0"
+         "Layer-2" -> "Scaler-1"
+         "Layer-3" -> "Scaler-1"
+
+         "Slave.Compiz" -> "Compiz"
+         "Layer-0" -> "Compiz"
+         "Layer-1" -> "Compiz"
+         "Layer-2" -> "Compiz"
+         "Layer-3" -> "Compiz"
+         "Scaler-0" -> "Compiz"
+         "Scaler-1" -> "Compiz"
+
+         "Compiz" -> "Scaler-0/1" -> "Wb_layer"
+         "Compiz" -> "Improc" -> "Timing Controller"
+      }
+
+      "Wb_layer" -> "Memory"
+      "Timing Controller" -> "Monitor"
+   }
+
+Sub-pipelines for input and output
+----------------------------------
+
+A complete display pipeline can be easily divided into three sub-pipelines
+according to the in/out usage.
+
+Layer(input) pipeline
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-render:: DOT
+   :alt: Layer data digraph
+   :caption: Layer (input) data flow
+
+   digraph layer_data_flow {
+      rankdir=LR;
+      node [shape=box]
+
+      {
+         node [bgcolor=grey style=dashed]
+           "Scaler-n";
+      }
+
+      "Layer-n" -> "Scaler-n" -> "Compiz"
+   }
+
+.. kernel-render:: DOT
+   :alt: Layer Split digraph
+   :caption: Layer Split pipeline
+
+   digraph layer_data_flow {
+      rankdir=LR;
+      node [shape=box]
+
+      "Layer-0/1" -> "Scaler-0" -> "Merger"
+      "Layer-2/3" -> "Scaler-1" -> "Merger"
+      "Merger" -> "Compiz"
+   }
+
+Writeback(output) pipeline
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. kernel-render:: DOT
+   :alt: writeback digraph
+   :caption: Writeback(output) data flow
+
+   digraph writeback_data_flow {
+      rankdir=LR;
+      node [shape=box]
+
+      {
+         node [bgcolor=grey style=dashed]
+           "Scaler-n";
+      }
+
+      "Compiz" -> "Scaler-n" -> "Wb_layer"
+   }
+
+.. kernel-render:: DOT
+   :alt: split writeback digraph
+   :caption: Writeback(output) Split data flow
+
+   digraph writeback_data_flow {
+      rankdir=LR;
+      node [shape=box]
+
+      "Compiz" -> "Splitter"
+      "Splitter" -> "Scaler-0" -> "Merger"
+      "Splitter" -> "Scaler-1" -> "Merger"
+      "Merger" -> "Wb_layer"
+   }
+
+Display output pipeline
+~~~~~~~~~~~~~~~~~~~~~~~
+.. kernel-render:: DOT
+   :alt: display digraph
+   :caption: display output data flow
+
+   digraph single_ppl {
+      rankdir=LR;
+      node [shape=box]
+
+      "Compiz" -> "Improc" -> "Timing Controller"
+   }
+
+In the following section we'll see these three sub-pipelines will be handled
+by KMS-plane/wb_conn/crtc respectively.
+
+Komeda Resource abstraction
+===========================
+
+struct komeda_pipeline/component
+--------------------------------
+
+To fully utilize and easily access/configure the HW, the driver side also uses
+a similar architecture: Pipeline/Component to describe the HW features and
+capabilities, and a specific component includes two parts:
+
+-  Data flow controlling.
+-  Specific component capabilities and features.
+
+So the driver defines a common header struct komeda_component to describe the
+data flow control and all specific components are a subclass of this base
+structure.
+
+.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+   :internal:
+
+Resource discovery and initialization
+=====================================
+
+Pipeline and component are used to describe how to handle the pixel data. We
+still need a @struct komeda_dev to describe the whole view of the device, and
+the control-abilites of device.
+
+We have &komeda_dev, &komeda_pipeline, &komeda_component. Now fill devices with
+pipelines. Since komeda is not for D71 only but also intended for later products,
+of course we’d better share as much as possible between different products. To
+achieve this, split the komeda device into two layers: CORE and CHIP.
+
+-   CORE: for common features and capabilities handling.
+-   CHIP: for register programing and HW specific feature (limitation) handling.
+
+CORE can access CHIP by three chip function structures:
+
+-   struct komeda_dev_funcs
+-   struct komeda_pipeline_funcs
+-   struct komeda_component_funcs
+
+.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+   :internal:
+
+Format handling
+===============
+
+.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+   :internal:
+.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+   :internal:
+
+Attach komeda_dev to DRM-KMS
+============================
+
+Komeda abstracts resources by pipeline/component, but DRM-KMS uses
+crtc/plane/connector. One KMS-obj cannot represent only one single component,
+since the requirements of a single KMS object cannot simply be achieved by a
+single component, usually that needs multiple components to fit the requirement.
+Like set mode, gamma, ctm for KMS all target on CRTC-obj, but komeda needs
+compiz, improc and timing_ctrlr to work together to fit these requirements.
+And a KMS-Plane may require multiple komeda resources: layer/scaler/compiz.
+
+So, one KMS-Obj represents a sub-pipeline of komeda resources.
+
+-   Plane: `Layer(input) pipeline`_
+-   Wb_connector: `Writeback(output) pipeline`_
+-   Crtc: `Display output pipeline`_
+
+So, for komeda, we treat KMS crtc/plane/connector as users of pipeline and
+component, and at any one time a pipeline/component only can be used by one
+user. And pipeline/component will be treated as private object of DRM-KMS; the
+state will be managed by drm_atomic_state as well.
+
+How to map plane to Layer(input) pipeline
+-----------------------------------------
+
+Komeda has multiple Layer input pipelines, see:
+-   `Single pipeline data flow`_
+-   `Dual pipeline with Slave enabled`_
+
+The easiest way is binding a plane to a fixed Layer pipeline, but consider the
+komeda capabilities:
+
+-   Layer Split, See `Layer(input) pipeline`_
+
+    Layer_Split is quite complicated feature, which splits a big image into two
+    parts and handles it by two layers and two scalers individually. But it
+    imports an edge problem or effect in the middle of the image after the split.
+    To avoid such a problem, it needs a complicated Split calculation and some
+    special configurations to the layer and scaler. We'd better hide such HW
+    related complexity to user mode.
+
+-   Slave pipeline, See `Dual pipeline with Slave enabled`_
+
+    Since the compiz component doesn't output alpha value, the slave pipeline
+    only can be used for bottom layers composition. The komeda driver wants to
+    hide this limitation to the user. The way to do this is to pick a suitable
+    Layer according to plane_state->zpos.
+
+So for komeda, the KMS-plane doesn't represent a fixed komeda layer pipeline,
+but multiple Layers with same capabilities. Komeda will select one or more
+Layers to fit the requirement of one KMS-plane.
+
+Make component/pipeline to be drm_private_obj
+---------------------------------------------
+
+Add :c:type:`drm_private_obj` to :c:type:`komeda_component`, :c:type:`komeda_pipeline`
+
+.. code-block:: c
+
+    struct komeda_component {
+        struct drm_private_obj obj;
+        ...
+    }
+
+    struct komeda_pipeline {
+        struct drm_private_obj obj;
+        ...
+    }
+
+Tracking component_state/pipeline_state by drm_atomic_state
+-----------------------------------------------------------
+
+Add :c:type:`drm_private_state` and user to :c:type:`komeda_component_state`,
+:c:type:`komeda_pipeline_state`
+
+.. code-block:: c
+
+    struct komeda_component_state {
+        struct drm_private_state obj;
+        void *binding_user;
+        ...
+    }
+
+    struct komeda_pipeline_state {
+        struct drm_private_state obj;
+        struct drm_crtc *crtc;
+        ...
+    }
+
+komeda component validation
+---------------------------
+
+Komeda has multiple types of components, but the process of validation are
+similar, usually including the following steps:
+
+.. code-block:: c
+
+    int komeda_xxxx_validate(struct komeda_component_xxx xxx_comp,
+                struct komeda_component_output *input_dflow,
+                struct drm_plane/crtc/connector *user,
+                struct drm_plane/crtc/connector_state, *user_state)
+    {
+         setup 1: check if component is needed, like the scaler is optional depending
+                  on the user_state; if unneeded, just return, and the caller will
+                  put the data flow into next stage.
+         Setup 2: check user_state with component features and capabilities to see
+                  if requirements can be met; if not, return fail.
+         Setup 3: get component_state from drm_atomic_state, and try set to set
+                  user to component; fail if component has been assigned to another
+                  user already.
+         Setup 3: configure the component_state, like set its input component,
+                  convert user_state to component specific state.
+         Setup 4: adjust the input_dflow and prepare it for the next stage.
+    }
+
+komeda_kms Abstraction
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+   :internal:
+
+komde_kms Functions
+-------------------
+.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+   :internal:
+.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+   :internal:
+
+Build komeda to be a Linux module driver
+========================================
+
+Now we have two level devices:
+
+-   komeda_dev: describes the real display hardware.
+-   komeda_kms_dev: attachs or connects komeda_dev to DRM-KMS.
+
+All komeda operations are supplied or operated by komeda_dev or komeda_kms_dev,
+the module driver is only a simple wrapper to pass the Linux command
+(probe/remove/pm) into komeda_dev or komeda_kms_dev.
index 41da7b06195c8562f0a4544ac309b08f90300ec1..cda4a37a02f066bbdb5913c2ac553ecc7c71559a 100644 (file)
@@ -82,30 +82,6 @@ events for atomic commits correctly. But fixing these bugs is good anyway.
 
 Contact: Daniel Vetter, respective driver maintainers
 
-Better manual-upload support for atomic
----------------------------------------
-
-This would be especially useful for tinydrm:
-
-- Add a struct drm_rect dirty_clip to drm_crtc_state. When duplicating the
-  crtc state, clear that to the max values, x/y = 0 and w/h = MAX_INT, in
-  __drm_atomic_helper_crtc_duplicate_state().
-
-- Move tinydrm_merge_clips into drm_framebuffer.c, dropping the tinydrm\_
-  prefix ofc and using drm_fb\_. drm_framebuffer.c makes sense since this
-  is a function useful to implement the fb->dirty function.
-
-- Create a new drm_fb_dirty function which does essentially what e.g.
-  mipi_dbi_fb_dirty does. You can use e.g. drm_atomic_helper_update_plane as the
-  template. But instead of doing a simple full-screen plane update, this new
-  helper also sets crtc_state->dirty_clip to the right coordinates. And of
-  course it needs to check whether the fb is actually active (and maybe where),
-  so there's some book-keeping involved. There's also some good fun involved in
-  scaling things appropriately. For that case we might simply give up and
-  declare the entire area covered by the plane as dirty.
-
-Contact: Noralf Trønnes, Daniel Vetter
-
 Fallout from atomic KMS
 -----------------------
 
@@ -209,6 +185,36 @@ Would be great to refactor this all into a set of small common helpers.
 
 Contact: Daniel Vetter
 
+Generic fbdev defio support
+---------------------------
+
+The defio support code in the fbdev core has some very specific requirements,
+which means drivers need to have a special framebuffer for fbdev. Which prevents
+us from using the generic fbdev emulation code everywhere. The main issue is
+that it uses some fields in struct page itself, which breaks shmem gem objects
+(and other things).
+
+Possible solution would be to write our own defio mmap code in the drm fbdev
+emulation. It would need to fully wrap the existing mmap ops, forwarding
+everything after it has done the write-protect/mkwrite trickery:
+
+- In the drm_fbdev_fb_mmap helper, if we need defio, change the
+  default page prots to write-protected with something like this::
+
+      vma->vm_page_prot = pgprot_wrprotect(vma->vm_page_prot);
+
+- Set the mkwrite and fsync callbacks with similar implementions to the core
+  fbdev defio stuff. These should all work on plain ptes, they don't actually
+  require a struct page.  uff. These should all work on plain ptes, they don't
+  actually require a struct page.
+
+- Track the dirty pages in a separate structure (bitfield with one bit per page
+  should work) to avoid clobbering struct page.
+
+Might be good to also have some igt testcases for this.
+
+Contact: Daniel Vetter, Noralf Tronnes
+
 Put a reservation_object into drm_gem_object
 --------------------------------------------
 
@@ -256,6 +262,44 @@ As a reference, take a look at the conversions already completed in drm core.
 
 Contact: Sean Paul, respective driver maintainers
 
+Rename CMA helpers to DMA helpers
+---------------------------------
+
+CMA (standing for contiguous memory allocator) is really a bit an accident of
+what these were used for first, a much better name would be DMA helpers. In the
+text these should even be called coherent DMA memory helpers (so maybe CDM, but
+no one knows what that means) since underneath they just use dma_alloc_coherent.
+
+Contact: Laurent Pinchart, Daniel Vetter
+
+Convert direct mode.vrefresh accesses to use drm_mode_vrefresh()
+----------------------------------------------------------------
+
+drm_display_mode.vrefresh isn't guaranteed to be populated. As such, using it
+is risky and has been known to cause div-by-zero bugs. Fortunately, drm core
+has helper which will use mode.vrefresh if it's !0 and will calculate it from
+the timings when it's 0.
+
+Use simple search/replace, or (more fun) cocci to replace instances of direct
+vrefresh access with a call to the helper. Check out
+https://lists.freedesktop.org/archives/dri-devel/2019-January/205186.html for
+inspiration.
+
+Once all instances of vrefresh have been converted, remove vrefresh from
+drm_display_mode to avoid future use.
+
+Contact: Sean Paul
+
+Remove drm_display_mode.hsync
+-----------------------------
+
+We have drm_mode_hsync() to calculate this from hsync_start/end, since drivers
+shouldn't/don't use this, remove this member to avoid any temptations to use it
+in the future. If there is any debug code using drm_display_mode.hsync, convert
+it to use drm_mode_hsync() instead.
+
+Contact: Sean Paul
+
 Core refactorings
 =================
 
@@ -429,21 +473,10 @@ those drivers as simple as possible, so lots of room for refactoring:
   one of the ideas for having a shared dsi/dbi helper, abstracting away the
   transport details more.
 
-- tinydrm_gem_cma_prime_import_sg_table should probably go into the cma
-  helpers, as a _vmapped variant (since not every driver needs the vmap).
-  And tinydrm_gem_cma_free_object could the be merged into
-  drm_gem_cma_free_object().
-
-- tinydrm_fb_create we could move into drm_simple_pipe, only need to add
-  the fb_create hook to drm_simple_pipe_funcs, which would again simplify a
-  bunch of things (since it gives you a one-stop vfunc for simple drivers).
-
 - Quick aside: The unregister devm stuff is kinda getting the lifetimes of
   a drm_device wrong. Doesn't matter, since everyone else gets it wrong
   too :-)
 
-- also rework the drm_framebuffer_funcs->dirty hook wire-up, see above.
-
 Contact: Noralf Trønnes, Daniel Vetter
 
 AMD DC Display Driver
index 6b510ef800f6e429a1f221eced2440860f6a9862..e7e81fadff652a2207352f3edf665c460f14a32a 100644 (file)
@@ -1133,13 +1133,26 @@ S:      Supported
 F:     drivers/gpu/drm/arm/hdlcd_*
 F:     Documentation/devicetree/bindings/display/arm,hdlcd.txt
 
+ARM KOMEDA DRM-KMS DRIVER
+M:     James (Qian) Wang <james.qian.wang@arm.com>
+M:     Liviu Dudau <liviu.dudau@arm.com>
+L:     Mali DP Maintainers <malidp@foss.arm.com>
+S:     Supported
+T:     git git://linux-arm.org/linux-ld.git for-upstream/mali-dp
+F:     drivers/gpu/drm/arm/display/include/
+F:     drivers/gpu/drm/arm/display/komeda/
+F:     Documentation/devicetree/bindings/display/arm/arm,komeda.txt
+F:     Documentation/gpu/komeda-kms.rst
+
 ARM MALI-DP DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
 M:     Brian Starkey <brian.starkey@arm.com>
-M:     Mali DP Maintainers <malidp@foss.arm.com>
+L:     Mali DP Maintainers <malidp@foss.arm.com>
 S:     Supported
+T:     git git://linux-arm.org/linux-ld.git for-upstream/mali-dp
 F:     drivers/gpu/drm/arm/
 F:     Documentation/devicetree/bindings/display/arm,malidp.txt
+F:     Documentation/gpu/afbc.rst
 
 ARM MFM AND FLOPPY DRIVERS
 M:     Ian Molton <spyro@f2s.com>
@@ -4894,6 +4907,12 @@ S:       Orphan / Obsolete
 F:     drivers/gpu/drm/sis/
 F:     include/uapi/drm/sis_drm.h
 
+DRM DRIVER FOR SITRONIX ST7701 PANELS
+M:     Jagan Teki <jagan@amarulasolutions.com>
+S:     Maintained
+F:     drivers/gpu/drm/panel/panel-sitronix-st7701.c
+F:     Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
+
 DRM DRIVER FOR SITRONIX ST7586 PANELS
 M:     David Lechner <david@lechnology.com>
 S:     Maintained
@@ -4910,6 +4929,13 @@ DRM DRIVER FOR TDFX VIDEO CARDS
 S:     Orphan / Obsolete
 F:     drivers/gpu/drm/tdfx/
 
+DRM DRIVER FOR TPO TPG110 PANELS
+M:     Linus Walleij <linus.walleij@linaro.org>
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+S:     Maintained
+F:     drivers/gpu/drm/panel/panel-tpo-tpg110.c
+F:     Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
+
 DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
 M:     Dave Airlie <airlied@redhat.com>
 R:     Sean Paul <sean@poorly.run>
@@ -4918,6 +4944,16 @@ S:       Odd Fixes
 F:     drivers/gpu/drm/udl/
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 
+DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
+M:     Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
+R:     Haneen Mohammed <hamohammed.sa@gmail.com>
+R:     Daniel Vetter <daniel@ffwll.ch>
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+S:     Maintained
+L:     dri-devel@lists.freedesktop.org
+F:     drivers/gpu/drm/vkms/
+F:     Documentation/gpu/vkms.rst
+
 DRM DRIVER FOR VMWARE VIRTUAL GPU
 M:     "VMware Graphics" <linux-graphics-maintainer@vmware.com>
 M:     Thomas Hellstrom <thellstrom@vmware.com>
index ca18e0d23df9775e0a9c342d5a22ac9257d6ee3a..c14cfaea92e25683e26970c24576a47ad3f8b927 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/export.h>
 #include <linux/acpi.h>
+#include <linux/mfd/intel_soc_pmic.h>
 #include <linux/regmap.h>
 #include <acpi/acpi_lpat.h>
 #include "intel_pmic.h"
@@ -36,6 +37,8 @@ struct intel_pmic_opregion {
        struct intel_pmic_regs_handler_ctx ctx;
 };
 
+static struct intel_pmic_opregion *intel_pmic_opregion;
+
 static int pmic_get_reg_bit(int address, struct pmic_table *table,
                            int count, int *reg, int *bit)
 {
@@ -304,6 +307,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
        }
 
        opregion->data = d;
+       intel_pmic_opregion = opregion;
        return 0;
 
 out_remove_thermal_handler:
@@ -319,3 +323,60 @@ out_error:
        return ret;
 }
 EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
+
+/**
+ * intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence
+ * @i2c_address:  I2C client address for the PMIC
+ * @reg_address:  PMIC register address
+ * @value:        New value for the register bits to change
+ * @mask:         Mask indicating which register bits to change
+ *
+ * DSI LCD panels describe an initialization sequence in the i915 VBT (Video
+ * BIOS Tables) using so called MIPI sequences. One possible element in these
+ * sequences is a PMIC specific element of 15 bytes.
+ *
+ * This function executes these PMIC specific elements sending the embedded
+ * commands to the PMIC.
+ *
+ * Return 0 on success, < 0 on failure.
+ */
+int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
+                                             u32 value, u32 mask)
+{
+       struct intel_pmic_opregion_data *d;
+       int ret;
+
+       if (!intel_pmic_opregion) {
+               pr_warn("%s: No PMIC registered\n", __func__);
+               return -ENXIO;
+       }
+
+       d = intel_pmic_opregion->data;
+
+       mutex_lock(&intel_pmic_opregion->lock);
+
+       if (d->exec_mipi_pmic_seq_element) {
+               ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap,
+                                                   i2c_address, reg_address,
+                                                   value, mask);
+       } else if (d->pmic_i2c_address) {
+               if (i2c_address == d->pmic_i2c_address) {
+                       ret = regmap_update_bits(intel_pmic_opregion->regmap,
+                                                reg_address, mask, value);
+               } else {
+                       pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
+                              __func__, i2c_address, reg_address, value, mask);
+                       ret = -ENXIO;
+               }
+       } else {
+               pr_warn("%s: Not implemented\n", __func__);
+               pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n",
+                       __func__, i2c_address, reg_address, value, mask);
+               ret = -EOPNOTSUPP;
+       }
+
+       mutex_unlock(&intel_pmic_opregion->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element);
index 095afc96952ee5acf03b19df763343f121e91ff7..89379476a1df61f255a03195f5663f3d07df5241 100644 (file)
@@ -15,10 +15,14 @@ struct intel_pmic_opregion_data {
        int (*update_aux)(struct regmap *r, int reg, int raw_temp);
        int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
        int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
+       int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
+                                         u32 reg_address, u32 value, u32 mask);
        struct pmic_table *power_table;
        int power_table_count;
        struct pmic_table *thermal_table;
        int thermal_table_count;
+       /* For generic exec_mipi_pmic_seq_element handling */
+       int pmic_i2c_address;
 };
 
 int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
index 078b0448f30a001f90dbc62deb63eafebb78a1ec..7ffd5624b8e15f9734aa3f301e4a3136a8633887 100644 (file)
@@ -231,6 +231,24 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
        return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0);
 }
 
+static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
+                                                  u16 i2c_client_address,
+                                                  u32 reg_address,
+                                                  u32 value, u32 mask)
+{
+       u32 address;
+
+       if (i2c_client_address > 0xff || reg_address > 0xff) {
+               pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
+                       __func__, i2c_client_address, reg_address);
+               return -ERANGE;
+       }
+
+       address = (i2c_client_address << 8) | reg_address;
+
+       return regmap_update_bits(regmap, address, mask, value);
+}
+
 /*
  * The thermal table and ops are empty, we do not support the Thermal opregion
  * (DPTF) due to lacking documentation.
@@ -238,6 +256,7 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
 static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
        .get_power              = intel_cht_wc_pmic_get_power,
        .update_power           = intel_cht_wc_pmic_update_power,
+       .exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
        .power_table            = power_table,
        .power_table_count      = ARRAY_SIZE(power_table),
 };
index 2579675b7082b76e593a095771f50e9c8e07bca9..1b49cbb1e21e85377e7474b7b2f00231e5ed2dce 100644 (file)
@@ -240,6 +240,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
        .power_table_count = ARRAY_SIZE(power_table),
        .thermal_table = thermal_table,
        .thermal_table_count = ARRAY_SIZE(thermal_table),
+       .pmic_i2c_address = 0x34,
 };
 
 static acpi_status intel_xpower_pmic_gpio_handler(u32 function,
index 4385f00e1d055583df06ae4b7be0e6a6a68adc51..bd943a71756ca81bb8fe38836ef07980a26dd324 100644 (file)
@@ -170,10 +170,6 @@ config DRM_KMS_CMA_HELPER
        bool
        depends on DRM
        select DRM_GEM_CMA_HELPER
-       select DRM_KMS_FB_HELPER
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
        help
          Choose this if you need the KMS CMA helper functions
 
index ce8d1d38431924f95fa5d2a695d8e71c5479f92e..1ac55c65eac0dc42700aac97c827391527ba4580 100644 (file)
@@ -51,7 +51,7 @@ obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
 obj-$(CONFIG_DRM)      += drm.o
 obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
 obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
-obj-$(CONFIG_DRM_ARM)  += arm/
+obj-y                  += arm/
 obj-$(CONFIG_DRM_TTM)  += ttm/
 obj-$(CONFIG_DRM_SCHED)        += scheduler/
 obj-$(CONFIG_DRM_TDFX) += tdfx/
@@ -81,7 +81,7 @@ obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
 obj-$(CONFIG_DRM_ARMADA) += armada/
 obj-$(CONFIG_DRM_ATMEL_HLCDC)  += atmel-hlcdc/
-obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
+obj-y                  += rcar-du/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-y                  += omapdrm/
 obj-$(CONFIG_DRM_SUN4I) += sun4i/
index 69ad6ec0a4f340a1a3b34e22bc49108ccb6577b1..bf04c12bd324b37314c86beb042891ba91f4b57b 100644 (file)
@@ -25,8 +25,8 @@
  */
 #include <drm/drmP.h>
 #include <drm/drm_edid.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 #include "atom.h"
index fcab1fe9bb68e0a4624feb79f32c8dc1512cecbe..4f8fb4ecde3419fe8449ddfcea859f17242e6919 100644 (file)
@@ -30,8 +30,8 @@
 #include <linux/console.h>
 #include <linux/slab.h>
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
@@ -2722,7 +2722,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        amdgpu_irq_disable_all(adev);
        if (adev->mode_info.mode_config_initialized){
                if (!amdgpu_device_has_dc_support(adev))
-                       drm_crtc_force_disable_all(adev->ddev);
+                       drm_helper_force_disable_all(adev->ddev);
                else
                        drm_atomic_helper_shutdown(adev->ddev);
        }
index 2f0ea380c031b447afd60448dc9a7ba17001e391..7f3aa7b7e1d82d38f11b360ab96ab6004fd9178d 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 #include <linux/vga_switcheroo.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
@@ -1191,7 +1191,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
 static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_ATOMIC |
-           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+           DRIVER_GEM |
            DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
        .load = amdgpu_driver_load_kms,
        .open = amdgpu_driver_open_kms,
index aadd0fa42e430d02bfb386152f5510fc332183e3..698fd8a2f775719ba53f3683dd0225cb5a825c38 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/hrtimer.h>
index e9934de1b9cf8127eb2b770e4301d6ee90c98223..dd30f4e61a8cd97c73c06dc756177dbc33a79d52 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
+#include <drm/drm_util.h>
+
 #define ATOM_DEBUG
 
 #include "atom.h"
index 4cfecdce29a3c86dc30e509007ecec3a22eda338..1f0426d2fc2a0149529e19dea0fa8cbbb21fd350 100644 (file)
@@ -1682,7 +1682,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
        dce_v10_0_audio_write_sad_regs(encoder);
        dce_v10_0_audio_write_latency_fields(encoder, mode);
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
        if (err < 0) {
                DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
                return;
index 7c868916d90f83a4493ffb27db4e6286430c75f5..2280b971d758457d7a07214687e6d3d6ecc70f78 100644 (file)
@@ -1724,7 +1724,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
        dce_v11_0_audio_write_sad_regs(encoder);
        dce_v11_0_audio_write_latency_fields(encoder, mode);
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
        if (err < 0) {
                DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
                return;
index 17eaaba3601706ce4f8ed9010c9460a5b074e619..db443ec53d3aeb5db36c09fdbf45f9287658f4d2 100644 (file)
@@ -1423,6 +1423,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
        struct hdmi_avi_infoframe frame;
        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
        uint8_t *payload = buffer + 3;
@@ -1430,7 +1431,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
        ssize_t err;
        u32 tmp;
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
        if (err < 0) {
                DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
                return;
index 8c0576978d36220d305e1b0231764d8b78b2b8ea..13da915991dd4b8bfa0a08236fdc6e485ed0f646 100644 (file)
@@ -1616,7 +1616,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
        dce_v8_0_audio_write_sad_regs(encoder);
        dce_v8_0_audio_write_latency_fields(encoder, mode);
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
        if (err < 0) {
                DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
                return;
index cae16b6d2344278d1da880b3b082b4721cd422bc..b39766bd28406707a7e374337ae1a483f94729f5 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/i2c.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_edid.h>
 
index 44c1a02e6452ebe59d7fd7eca6b4eeabb3639e85..f51d52eb52e6d23f269cb3d7fa6745fb677bc002 100644 (file)
@@ -137,6 +137,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
        drm_encoder_cleanup(&amdgpu_encoder->base);
        kfree(amdgpu_encoder);
        drm_connector_cleanup(connector);
+       drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
        kfree(amdgpu_dm_connector);
 }
 
@@ -314,7 +315,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        amdgpu_dm_connector_funcs_reset(connector);
 
        DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
-                       aconnector, connector->base.id, aconnector->mst_port);
+                aconnector, connector->base.id, aconnector->mst_port);
+
+       drm_dp_mst_get_port_malloc(port);
 
        DRM_DEBUG_KMS(":%d\n", connector->base.id);
 
@@ -330,12 +333,12 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
        DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
-                               aconnector, connector->base.id, aconnector->mst_port);
+                aconnector, connector->base.id, aconnector->mst_port);
 
-       aconnector->port = NULL;
        if (aconnector->dc_sink) {
                amdgpu_dm_update_freesync_caps(connector, NULL);
-               dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+               dc_link_remove_remote_sink(aconnector->dc_link,
+                                          aconnector->dc_sink);
                dc_sink_release(aconnector->dc_sink);
                aconnector->dc_sink = NULL;
        }
index e8e9eebbae3f0d66b7cef395b17c186c19978247..a114954d6a5b1119ddd4d80f263dcd19c11b2ea0 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/acpi.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
 #include "dm_services.h"
 #include "amdgpu.h"
index 516795342dd2815629e0876031fa47070c9ec12a..d915e8c8769b03425697a8c129723ff02fd75935 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/acpi.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
 #include "dm_services.h"
 #include "amdgpu.h"
index 62f51f70606d73060ea0d0430e305a0ff6b9860f..73e508e00e30c63cbd9c17988eb4b470112987f3 100644 (file)
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_vblank.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/platform_data/simplefb.h>
 
index 206a76abf77133f0e56ac668041ada8b9ea9859b..c9f78397d345529aee5d347ff8eb729edba4ec86 100644 (file)
  */
 
 #include <linux/clk.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
 #include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
 
 #include "arcpgu.h"
 #include "arcpgu_regs.h"
index 68629e6149909fd08d52f22e314296971a3d4618..5ea053cf805c7437482e935d1779aa7e80ca55c6 100644 (file)
@@ -14,8 +14,9 @@
  *
  */
 
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_probe_helper.h>
 
 #include "arcpgu.h"
 
@@ -51,7 +52,6 @@ arcpgu_drm_connector_helper_funcs = {
 };
 
 static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
        .reset = drm_atomic_helper_connector_reset,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = arcpgu_drm_connector_destroy,
index 9a18e1bd57b427f7362366ffe9af64d02b21d63c..a204103b3efbf5a235b35aac98831aedab762267 100644 (file)
@@ -1,13 +1,10 @@
-config DRM_ARM
-       bool
-       help
-         Choose this option to select drivers for ARM's devices
+# SPDX-License-Identifier: GPL-2.0
+menu "ARM devices"
 
 config DRM_HDLCD
        tristate "ARM HDLCD"
        depends on DRM && OF && (ARM || ARM64)
        depends on COMMON_CLK
-       select DRM_ARM
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        help
@@ -29,7 +26,6 @@ config DRM_MALI_DISPLAY
        tristate "ARM Mali Display Processor"
        depends on DRM && OF && (ARM || ARM64)
        depends on COMMON_CLK
-       select DRM_ARM
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
@@ -40,3 +36,7 @@ config DRM_MALI_DISPLAY
          of the hardware.
 
          If compiled as a module it will be called mali-dp.
+
+source "drivers/gpu/drm/arm/display/Kconfig"
+
+endmenu
index 3bf31d1a4722cafa2bb2108e8bad2f616d7aa895..120bef801fcf1ce28bbc3e372a086900d92b74af 100644 (file)
@@ -3,3 +3,4 @@ obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
 mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
 mali-dp-y += malidp_mw.o
 obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
+obj-$(CONFIG_DRM_KOMEDA) += display/
diff --git a/drivers/gpu/drm/arm/display/Kbuild b/drivers/gpu/drm/arm/display/Kbuild
new file mode 100644 (file)
index 0000000..382f1ca
--- /dev/null
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_KOMEDA) += komeda/
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
new file mode 100644 (file)
index 0000000..cec0639
--- /dev/null
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+config DRM_KOMEDA
+       tristate "ARM Komeda display driver"
+       depends on DRM && OF
+       depends on COMMON_CLK
+       select DRM_KMS_HELPER
+       select DRM_KMS_CMA_HELPER
+       select DRM_GEM_CMA_HELPER
+       select VIDEOMODE_HELPERS
+       help
+         Choose this option if you want to compile the ARM Komeda display
+         Processor driver. It supports the D71 variants of the hardware.
+
+         If compiled as a module it will be called komeda.
diff --git a/drivers/gpu/drm/arm/display/include/malidp_io.h b/drivers/gpu/drm/arm/display/include/malidp_io.h
new file mode 100644 (file)
index 0000000..4fb3caf
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _MALIDP_IO_H_
+#define _MALIDP_IO_H_
+
+#include <linux/io.h>
+
+static inline u32
+malidp_read32(u32 __iomem *base, u32 offset)
+{
+       return readl((base + (offset >> 2)));
+}
+
+static inline void
+malidp_write32(u32 __iomem *base, u32 offset, u32 v)
+{
+       writel(v, (base + (offset >> 2)));
+}
+
+static inline void
+malidp_write32_mask(u32 __iomem *base, u32 offset, u32 m, u32 v)
+{
+       u32 tmp = malidp_read32(base, offset);
+
+       tmp &= (~m);
+       malidp_write32(base, offset, v | tmp);
+}
+
+static inline void
+malidp_write_group(u32 __iomem *base, u32 offset, int num, const u32 *values)
+{
+       int i;
+
+       for (i = 0; i < num; i++)
+               malidp_write32(base, offset + i * 4, values[i]);
+}
+
+#endif /*_MALIDP_IO_H_*/
diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h
new file mode 100644 (file)
index 0000000..b35fc5d
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _MALIDP_PRODUCT_H_
+#define _MALIDP_PRODUCT_H_
+
+/* Product identification */
+#define MALIDP_CORE_ID(__product, __major, __minor, __status) \
+       ((((__product) & 0xFFFF) << 16) | (((__major) & 0xF) << 12) | \
+       (((__minor) & 0xF) << 8) | ((__status) & 0xFF))
+
+#define MALIDP_CORE_ID_PRODUCT_ID(__core_id) ((__u32)(__core_id) >> 16)
+#define MALIDP_CORE_ID_MAJOR(__core_id)      (((__u32)(__core_id) >> 12) & 0xF)
+#define MALIDP_CORE_ID_MINOR(__core_id)      (((__u32)(__core_id) >> 8) & 0xF)
+#define MALIDP_CORE_ID_STATUS(__core_id)     (((__u32)(__core_id)) & 0xFF)
+
+/* Mali-display product IDs */
+#define MALIDP_D71_PRODUCT_ID   0x0071
+
+#endif /* _MALIDP_PRODUCT_H_ */
diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
new file mode 100644 (file)
index 0000000..63cc47c
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _MALIDP_UTILS_
+#define _MALIDP_UTILS_
+
+#define has_bit(nr, mask)      (BIT(nr) & (mask))
+#define has_bits(bits, mask)   (((bits) & (mask)) == (bits))
+
+#define dp_for_each_set_bit(bit, mask) \
+       for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+
+#endif /* _MALIDP_UTILS_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
new file mode 100644 (file)
index 0000000..1b875e5
--- /dev/null
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := \
+       -I$(src)/../include \
+       -I$(src)
+
+komeda-y := \
+       komeda_drv.o \
+       komeda_dev.o \
+       komeda_format_caps.o \
+       komeda_pipeline.o \
+       komeda_framebuffer.o \
+       komeda_kms.o \
+       komeda_crtc.o \
+       komeda_plane.o \
+       komeda_private_obj.o
+
+komeda-y += \
+       d71/d71_dev.o
+
+obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
new file mode 100644 (file)
index 0000000..edbf9da
--- /dev/null
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include "malidp_io.h"
+#include "komeda_dev.h"
+
+static int d71_enum_resources(struct komeda_dev *mdev)
+{
+       /* TODO add enum resources */
+       return -1;
+}
+
+#define __HW_ID(__group, __format) \
+       ((((__group) & 0x7) << 3) | ((__format) & 0x7))
+
+#define RICH           KOMEDA_FMT_RICH_LAYER
+#define SIMPLE         KOMEDA_FMT_SIMPLE_LAYER
+#define RICH_SIMPLE    (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER)
+#define RICH_WB                (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER)
+#define RICH_SIMPLE_WB (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER)
+
+#define Rot_0          DRM_MODE_ROTATE_0
+#define Flip_H_V       (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0)
+#define Rot_ALL_H_V    (DRM_MODE_ROTATE_MASK | Flip_H_V)
+
+#define LYT_NM         BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16)
+#define LYT_WB         BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+#define LYT_NM_WB      (LYT_NM | LYT_WB)
+
+#define AFB_TH         AFBC(_TILED | _SPARSE)
+#define AFB_TH_SC_YTR  AFBC(_TILED | _SC | _SPARSE | _YTR)
+#define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
+
+static struct komeda_format_caps d71_format_caps_table[] = {
+       /*   HW_ID    |        fourcc        | tile_sz |   layer_types |   rots    | afbc_layouts | afbc_features */
+       /* ABGR_2101010*/
+       {__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, 1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, 1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, 1,      RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
+       {__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, 1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, 1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       /* ABGR_8888*/
+       {__HW_ID(1, 0), DRM_FORMAT_ARGB8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(1, 1), DRM_FORMAT_ABGR8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(1, 1), DRM_FORMAT_ABGR8888,    1,      RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
+       {__HW_ID(1, 2), DRM_FORMAT_RGBA8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(1, 3), DRM_FORMAT_BGRA8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       /* XBGB_8888 */
+       {__HW_ID(2, 0), DRM_FORMAT_XRGB8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(2, 1), DRM_FORMAT_XBGR8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(2, 2), DRM_FORMAT_RGBX8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       {__HW_ID(2, 3), DRM_FORMAT_BGRX8888,    1,      RICH_SIMPLE_WB, Flip_H_V,               0, 0},
+       /* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */
+       {__HW_ID(3, 0), DRM_FORMAT_RGB888,      1,      RICH_SIMPLE_WB, Rot_0,                  0, 0},
+       {__HW_ID(3, 1), DRM_FORMAT_BGR888,      1,      RICH_SIMPLE_WB, Rot_0,                  0, 0},
+       {__HW_ID(3, 1), DRM_FORMAT_BGR888,      1,      RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
+       /* BGR 16bpp */
+       {__HW_ID(4, 0), DRM_FORMAT_RGBA5551,    1,      RICH_SIMPLE,    Flip_H_V,               0, 0},
+       {__HW_ID(4, 1), DRM_FORMAT_ABGR1555,    1,      RICH_SIMPLE,    Flip_H_V,               0, 0},
+       {__HW_ID(4, 1), DRM_FORMAT_ABGR1555,    1,      RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
+       {__HW_ID(4, 2), DRM_FORMAT_RGB565,      1,      RICH_SIMPLE,    Flip_H_V,               0, 0},
+       {__HW_ID(4, 3), DRM_FORMAT_BGR565,      1,      RICH_SIMPLE,    Flip_H_V,               0, 0},
+       {__HW_ID(4, 3), DRM_FORMAT_BGR565,      1,      RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
+       {__HW_ID(4, 4), DRM_FORMAT_R8,          1,      SIMPLE,         Rot_0,                  0, 0},
+       /* YUV 444/422/420 8bit  */
+       {__HW_ID(5, 0), 0 /*XYUV8888*/,         1,      0,              0,                      0, 0},
+       /* XYUV unsupported*/
+       {__HW_ID(5, 1), DRM_FORMAT_YUYV,        1,      RICH,           Rot_ALL_H_V,    LYT_NM, AFB_TH}, /* afbc */
+       {__HW_ID(5, 2), DRM_FORMAT_YUYV,        1,      RICH,           Flip_H_V,               0, 0},
+       {__HW_ID(5, 3), DRM_FORMAT_UYVY,        1,      RICH,           Flip_H_V,               0, 0},
+       {__HW_ID(5, 4), 0, /*X0L0 */            2,              0,                      0, 0}, /* Y0L0 unsupported */
+       {__HW_ID(5, 6), DRM_FORMAT_NV12,        1,      RICH,           Flip_H_V,               0, 0},
+       {__HW_ID(5, 6), 0/*DRM_FORMAT_YUV420_8BIT*/,    1,      RICH,   Rot_ALL_H_V,    LYT_NM, AFB_TH}, /* afbc */
+       {__HW_ID(5, 7), DRM_FORMAT_YUV420,      1,      RICH,           Flip_H_V,               0, 0},
+       /* YUV 10bit*/
+       {__HW_ID(6, 0), 0,/*XVYU2101010*/       1,      0,              0,                      0, 0},/* VYV30 unsupported */
+       {__HW_ID(6, 6), 0/*DRM_FORMAT_X0L2*/,   2,      RICH,           Flip_H_V,               0, 0},
+       {__HW_ID(6, 7), 0/*DRM_FORMAT_P010*/,   1,      RICH,           Flip_H_V,               0, 0},
+       {__HW_ID(6, 7), 0/*DRM_FORMAT_YUV420_10BIT*/, 1,        RICH,   Rot_ALL_H_V,    LYT_NM, AFB_TH},
+};
+
+static void d71_init_fmt_tbl(struct komeda_dev *mdev)
+{
+       struct komeda_format_caps_table *table = &mdev->fmt_tbl;
+
+       table->format_caps = d71_format_caps_table;
+       table->n_formats = ARRAY_SIZE(d71_format_caps_table);
+}
+
+static struct komeda_dev_funcs d71_chip_funcs = {
+       .init_format_table = d71_init_fmt_tbl,
+       .enum_resources = d71_enum_resources,
+       .cleanup        = NULL,
+};
+
+#define GLB_ARCH_ID            0x000
+#define GLB_CORE_ID            0x004
+#define GLB_CORE_INFO          0x008
+
+struct komeda_dev_funcs *
+d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
+{
+       chip->arch_id   = malidp_read32(reg_base, GLB_ARCH_ID);
+       chip->core_id   = malidp_read32(reg_base, GLB_CORE_ID);
+       chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
+
+       return &d71_chip_funcs;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
new file mode 100644 (file)
index 0000000..5bb5a55
--- /dev/null
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/pm_runtime.h>
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+
+struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+};
+
+static const struct drm_crtc_funcs komeda_crtc_funcs = {
+};
+
+int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
+                          struct komeda_dev *mdev)
+{
+       struct komeda_crtc *crtc;
+       struct komeda_pipeline *master;
+       char str[16];
+       int i;
+
+       kms->n_crtcs = 0;
+
+       for (i = 0; i < mdev->n_pipelines; i++) {
+               crtc = &kms->crtcs[kms->n_crtcs];
+               master = mdev->pipelines[i];
+
+               crtc->master = master;
+               crtc->slave  = NULL;
+
+               if (crtc->slave)
+                       sprintf(str, "pipe-%d", crtc->slave->id);
+               else
+                       sprintf(str, "None");
+
+               DRM_INFO("crtc%d: master(pipe-%d) slave(%s) output: %s.\n",
+                        kms->n_crtcs, master->id, str,
+                        master->of_output_dev ?
+                        master->of_output_dev->full_name : "None");
+
+               kms->n_crtcs++;
+       }
+
+       return 0;
+}
+
+static struct drm_plane *
+get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc)
+{
+       struct komeda_plane *kplane;
+       struct drm_plane *plane;
+
+       drm_for_each_plane(plane, &kms->base) {
+               if (plane->type != DRM_PLANE_TYPE_PRIMARY)
+                       continue;
+
+               kplane = to_kplane(plane);
+               /* only master can be primary */
+               if (kplane->layer->base.pipeline == crtc->master)
+                       return plane;
+       }
+
+       return NULL;
+}
+
+static int komeda_crtc_add(struct komeda_kms_dev *kms,
+                          struct komeda_crtc *kcrtc)
+{
+       struct drm_crtc *crtc = &kcrtc->base;
+       int err;
+
+       err = drm_crtc_init_with_planes(&kms->base, crtc,
+                                       get_crtc_primary(kms, kcrtc), NULL,
+                                       &komeda_crtc_funcs, NULL);
+       if (err)
+               return err;
+
+       drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs);
+       drm_crtc_vblank_reset(crtc);
+
+       crtc->port = kcrtc->master->of_output_port;
+
+       return 0;
+}
+
+int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev)
+{
+       int i, err;
+
+       for (i = 0; i < kms->n_crtcs; i++) {
+               err = komeda_crtc_add(kms, &kms->crtcs[i]);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
new file mode 100644 (file)
index 0000000..0fe6954
--- /dev/null
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include "komeda_dev.h"
+
+static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
+{
+       struct komeda_pipeline *pipe;
+       struct clk *clk;
+       u32 pipe_id;
+       int ret = 0;
+
+       ret = of_property_read_u32(np, "reg", &pipe_id);
+       if (ret != 0 || pipe_id >= mdev->n_pipelines)
+               return -EINVAL;
+
+       pipe = mdev->pipelines[pipe_id];
+
+       clk = of_clk_get_by_name(np, "aclk");
+       if (IS_ERR(clk)) {
+               DRM_ERROR("get aclk for pipeline %d failed!\n", pipe_id);
+               return PTR_ERR(clk);
+       }
+       pipe->aclk = clk;
+
+       clk = of_clk_get_by_name(np, "pxclk");
+       if (IS_ERR(clk)) {
+               DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe_id);
+               return PTR_ERR(clk);
+       }
+       pipe->pxlclk = clk;
+
+       /* enum ports */
+       pipe->of_output_dev =
+               of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 0);
+       pipe->of_output_port =
+               of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
+
+       pipe->of_node = np;
+
+       return 0;
+}
+
+static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
+{
+       struct device_node *child, *np = dev->of_node;
+       struct clk *clk;
+       int ret;
+
+       clk = devm_clk_get(dev, "mclk");
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       mdev->mclk = clk;
+
+       for_each_available_child_of_node(np, child) {
+               if (of_node_cmp(child->name, "pipeline") == 0) {
+                       ret = komeda_parse_pipe_dt(mdev, child);
+                       if (ret) {
+                               DRM_ERROR("parse pipeline dt error!\n");
+                               of_node_put(child);
+                               break;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+struct komeda_dev *komeda_dev_create(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       const struct komeda_product_data *product;
+       struct komeda_dev *mdev;
+       struct resource *io_res;
+       int err = 0;
+
+       product = of_device_get_match_data(dev);
+       if (!product)
+               return ERR_PTR(-ENODEV);
+
+       io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!io_res) {
+               DRM_ERROR("No registers defined.\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL);
+       if (!mdev)
+               return ERR_PTR(-ENOMEM);
+
+       mdev->dev = dev;
+       mdev->reg_base = devm_ioremap_resource(dev, io_res);
+       if (IS_ERR(mdev->reg_base)) {
+               DRM_ERROR("Map register space failed.\n");
+               err = PTR_ERR(mdev->reg_base);
+               mdev->reg_base = NULL;
+               goto err_cleanup;
+       }
+
+       mdev->pclk = devm_clk_get(dev, "pclk");
+       if (IS_ERR(mdev->pclk)) {
+               DRM_ERROR("Get APB clk failed.\n");
+               err = PTR_ERR(mdev->pclk);
+               mdev->pclk = NULL;
+               goto err_cleanup;
+       }
+
+       /* Enable APB clock to access the registers */
+       clk_prepare_enable(mdev->pclk);
+
+       mdev->funcs = product->identify(mdev->reg_base, &mdev->chip);
+       if (!komeda_product_match(mdev, product->product_id)) {
+               DRM_ERROR("DT configured %x mismatch with real HW %x.\n",
+                         product->product_id,
+                         MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
+               err = -ENODEV;
+               goto err_cleanup;
+       }
+
+       DRM_INFO("Found ARM Mali-D%x version r%dp%d\n",
+                MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id),
+                MALIDP_CORE_ID_MAJOR(mdev->chip.core_id),
+                MALIDP_CORE_ID_MINOR(mdev->chip.core_id));
+
+       mdev->funcs->init_format_table(mdev);
+
+       err = mdev->funcs->enum_resources(mdev);
+       if (err) {
+               DRM_ERROR("enumerate display resource failed.\n");
+               goto err_cleanup;
+       }
+
+       err = komeda_parse_dt(dev, mdev);
+       if (err) {
+               DRM_ERROR("parse device tree failed.\n");
+               goto err_cleanup;
+       }
+
+       return mdev;
+
+err_cleanup:
+       komeda_dev_destroy(mdev);
+       return ERR_PTR(err);
+}
+
+void komeda_dev_destroy(struct komeda_dev *mdev)
+{
+       struct device *dev = mdev->dev;
+       struct komeda_dev_funcs *funcs = mdev->funcs;
+       int i;
+
+       for (i = 0; i < mdev->n_pipelines; i++) {
+               komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
+               mdev->pipelines[i] = NULL;
+       }
+
+       mdev->n_pipelines = 0;
+
+       if (funcs && funcs->cleanup)
+               funcs->cleanup(mdev);
+
+       if (mdev->reg_base) {
+               devm_iounmap(dev, mdev->reg_base);
+               mdev->reg_base = NULL;
+       }
+
+       if (mdev->mclk) {
+               devm_clk_put(dev, mdev->mclk);
+               mdev->mclk = NULL;
+       }
+
+       if (mdev->pclk) {
+               clk_disable_unprepare(mdev->pclk);
+               devm_clk_put(dev, mdev->pclk);
+               mdev->pclk = NULL;
+       }
+
+       devm_kfree(dev, mdev);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
new file mode 100644 (file)
index 0000000..0f77dea
--- /dev/null
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _KOMEDA_DEV_H_
+#define _KOMEDA_DEV_H_
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include "komeda_pipeline.h"
+#include "malidp_product.h"
+#include "komeda_format_caps.h"
+
+/* malidp device id */
+enum {
+       MALI_D71 = 0,
+};
+
+/* pipeline DT ports */
+enum {
+       KOMEDA_OF_PORT_OUTPUT           = 0,
+       KOMEDA_OF_PORT_COPROC           = 1,
+};
+
+struct komeda_chip_info {
+       u32 arch_id;
+       u32 core_id;
+       u32 core_info;
+       u32 bus_width;
+};
+
+struct komeda_product_data {
+       u32 product_id;
+       struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
+                                            struct komeda_chip_info *info);
+};
+
+struct komeda_dev;
+
+/**
+ * struct komeda_dev_funcs
+ *
+ * Supplied by chip level and returned by the chip entry function xxx_identify,
+ */
+struct komeda_dev_funcs {
+       /**
+        * @init_format_table:
+        *
+        * initialize &komeda_dev->format_table, this function should be called
+        * before the &enum_resource
+        */
+       void (*init_format_table)(struct komeda_dev *mdev);
+       /**
+        * @enum_resources:
+        *
+        * for CHIP to report or add pipeline and component resources to CORE
+        */
+       int (*enum_resources)(struct komeda_dev *mdev);
+       /** @cleanup: call to chip to cleanup komeda_dev->chip data */
+       void (*cleanup)(struct komeda_dev *mdev);
+};
+
+/**
+ * struct komeda_dev
+ *
+ * Pipeline and component are used to describe how to handle the pixel data.
+ * komeda_device is for describing the whole view of the device, and the
+ * control-abilites of device.
+ */
+struct komeda_dev {
+       struct device *dev;
+       u32 __iomem   *reg_base;
+
+       struct komeda_chip_info chip;
+       /** @fmt_tbl: initialized by &komeda_dev_funcs->init_format_table */
+       struct komeda_format_caps_table fmt_tbl;
+       /** @pclk: APB clock for register access */
+       struct clk *pclk;
+       /** @mck: HW main engine clk */
+       struct clk *mclk;
+
+       int n_pipelines;
+       struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
+
+       /** @funcs: chip funcs to access to HW */
+       struct komeda_dev_funcs *funcs;
+       /**
+        * @chip_data:
+        *
+        * chip data will be added by &komeda_dev_funcs.enum_resources() and
+        * destroyed by &komeda_dev_funcs.cleanup()
+        */
+       void *chip_data;
+};
+
+static inline bool
+komeda_product_match(struct komeda_dev *mdev, u32 target)
+{
+       return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
+}
+
+struct komeda_dev_funcs *
+d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
+
+struct komeda_dev *komeda_dev_create(struct device *dev);
+void komeda_dev_destroy(struct komeda_dev *mdev);
+
+#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
new file mode 100644 (file)
index 0000000..2bdd189
--- /dev/null
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/component.h>
+#include <drm/drm_of.h>
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+
+struct komeda_drv {
+       struct komeda_dev *mdev;
+       struct komeda_kms_dev *kms;
+};
+
+static void komeda_unbind(struct device *dev)
+{
+       struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+       if (!mdrv)
+               return;
+
+       komeda_kms_detach(mdrv->kms);
+       komeda_dev_destroy(mdrv->mdev);
+
+       dev_set_drvdata(dev, NULL);
+       devm_kfree(dev, mdrv);
+}
+
+static int komeda_bind(struct device *dev)
+{
+       struct komeda_drv *mdrv;
+       int err;
+
+       mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
+       if (!mdrv)
+               return -ENOMEM;
+
+       mdrv->mdev = komeda_dev_create(dev);
+       if (IS_ERR(mdrv->mdev)) {
+               err = PTR_ERR(mdrv->mdev);
+               goto free_mdrv;
+       }
+
+       mdrv->kms = komeda_kms_attach(mdrv->mdev);
+       if (IS_ERR(mdrv->kms)) {
+               err = PTR_ERR(mdrv->kms);
+               goto destroy_mdev;
+       }
+
+       dev_set_drvdata(dev, mdrv);
+
+       return 0;
+
+destroy_mdev:
+       komeda_dev_destroy(mdrv->mdev);
+
+free_mdrv:
+       devm_kfree(dev, mdrv);
+       return err;
+}
+
+static const struct component_master_ops komeda_master_ops = {
+       .bind   = komeda_bind,
+       .unbind = komeda_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+       return dev->of_node == data;
+}
+
+static void komeda_add_slave(struct device *master,
+                            struct component_match **match,
+                            struct device_node *np, int port)
+{
+       struct device_node *remote;
+
+       remote = of_graph_get_remote_node(np, port, 0);
+       if (remote) {
+               drm_of_component_match_add(master, match, compare_of, remote);
+               of_node_put(remote);
+       }
+}
+
+static int komeda_platform_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct component_match *match = NULL;
+       struct device_node *child;
+
+       if (!dev->of_node)
+               return -ENODEV;
+
+       for_each_available_child_of_node(dev->of_node, child) {
+               if (of_node_cmp(child->name, "pipeline") != 0)
+                       continue;
+
+               /* add connector */
+               komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT);
+       }
+
+       return component_master_add_with_match(dev, &komeda_master_ops, match);
+}
+
+static int komeda_platform_remove(struct platform_device *pdev)
+{
+       component_master_del(&pdev->dev, &komeda_master_ops);
+       return 0;
+}
+
+static const struct komeda_product_data komeda_products[] = {
+       [MALI_D71] = {
+               .product_id = MALIDP_D71_PRODUCT_ID,
+               .identify = d71_identify,
+       },
+};
+
+const struct of_device_id komeda_of_match[] = {
+       { .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, komeda_of_match);
+
+static struct platform_driver komeda_platform_driver = {
+       .probe  = komeda_platform_probe,
+       .remove = komeda_platform_remove,
+       .driver = {
+               .name = "komeda",
+               .of_match_table = komeda_of_match,
+               .pm = NULL,
+       },
+};
+
+module_platform_driver(komeda_platform_driver);
+
+MODULE_AUTHOR("James.Qian.Wang <james.qian.wang@arm.com>");
+MODULE_DESCRIPTION("Komeda KMS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
new file mode 100644 (file)
index 0000000..1e17bd6
--- /dev/null
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include "komeda_format_caps.h"
+#include "malidp_utils.h"
+
+const struct komeda_format_caps *
+komeda_get_format_caps(struct komeda_format_caps_table *table,
+                      u32 fourcc, u64 modifier)
+{
+       const struct komeda_format_caps *caps;
+       u64 afbc_features = modifier & ~(AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
+       u32 afbc_layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
+       int id;
+
+       for (id = 0; id < table->n_formats; id++) {
+               caps = &table->format_caps[id];
+
+               if (fourcc != caps->fourcc)
+                       continue;
+
+               if ((modifier == 0ULL) && (caps->supported_afbc_layouts == 0))
+                       return caps;
+
+               if (has_bits(afbc_features, caps->supported_afbc_features) &&
+                   has_bit(afbc_layout, caps->supported_afbc_layouts))
+                       return caps;
+       }
+
+       return NULL;
+}
+
+u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
+                                 u32 layer_type, u32 *n_fmts)
+{
+       const struct komeda_format_caps *cap;
+       u32 *fmts;
+       int i, j, n = 0;
+
+       fmts = kcalloc(table->n_formats, sizeof(u32), GFP_KERNEL);
+       if (!fmts)
+               return NULL;
+
+       for (i = 0; i < table->n_formats; i++) {
+               cap = &table->format_caps[i];
+               if (!(layer_type & cap->supported_layer_types) ||
+                   (cap->fourcc == 0))
+                       continue;
+
+               /* one fourcc may has two caps items in table (afbc/none-afbc),
+                * so check the existing list to avoid adding a duplicated one.
+                */
+               for (j = n - 1; j >= 0; j--)
+                       if (fmts[j] == cap->fourcc)
+                               break;
+
+               if (j < 0)
+                       fmts[n++] = cap->fourcc;
+       }
+
+       if (n_fmts)
+               *n_fmts = n;
+
+       return fmts;
+}
+
+void komeda_put_fourcc_list(u32 *fourcc_list)
+{
+       kfree(fourcc_list);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
new file mode 100644 (file)
index 0000000..60f39e7
--- /dev/null
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#ifndef _KOMEDA_FORMAT_CAPS_H_
+#define _KOMEDA_FORMAT_CAPS_H_
+
+#include <linux/types.h>
+#include <uapi/drm/drm_fourcc.h>
+#include <drm/drm_fourcc.h>
+
+#define AFBC(x)                DRM_FORMAT_MOD_ARM_AFBC(x)
+
+/* afbc layerout */
+#define AFBC_16x16(x)  AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | (x))
+#define AFBC_32x8(x)   AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | (x))
+
+/* afbc features */
+#define _YTR           AFBC_FORMAT_MOD_YTR
+#define _SPLIT         AFBC_FORMAT_MOD_SPLIT
+#define _SPARSE                AFBC_FORMAT_MOD_SPARSE
+#define _CBR           AFBC_FORMAT_MOD_CBR
+#define _TILED         AFBC_FORMAT_MOD_TILED
+#define _SC            AFBC_FORMAT_MOD_SC
+
+/* layer_type */
+#define KOMEDA_FMT_RICH_LAYER          BIT(0)
+#define KOMEDA_FMT_SIMPLE_LAYER                BIT(1)
+#define KOMEDA_FMT_WB_LAYER            BIT(2)
+
+#define AFBC_TH_LAYOUT_ALIGNMENT       8
+#define AFBC_HEADER_SIZE               16
+#define AFBC_SUPERBLK_ALIGNMENT                128
+#define AFBC_SUPERBLK_PIXELS           256
+#define AFBC_BODY_START_ALIGNMENT      1024
+#define AFBC_TH_BODY_START_ALIGNMENT   4096
+
+/**
+ * struct komeda_format_caps
+ *
+ * komeda_format_caps is for describing ARM display specific features and
+ * limitations for a specific format, and format_caps will be linked into
+ * &komeda_framebuffer like a extension of &drm_format_info.
+ *
+ * NOTE: one fourcc may has two different format_caps items for fourcc and
+ * fourcc+modifier
+ *
+ * @hw_id: hw format id, hw specific value.
+ * @fourcc: drm fourcc format.
+ * @tile_size: format tiled size, used by ARM format X0L0/X0L2
+ * @supported_layer_types: indicate which layer supports this format
+ * @supported_rots: allowed rotations for this format
+ * @supported_afbc_layouts: supported afbc layerout
+ * @supported_afbc_features: supported afbc features
+ */
+struct komeda_format_caps {
+       u32 hw_id;
+       u32 fourcc;
+       u32 tile_size;
+       u32 supported_layer_types;
+       u32 supported_rots;
+       u32 supported_afbc_layouts;
+       u64 supported_afbc_features;
+};
+
+/**
+ * struct komeda_format_caps_table - format_caps mananger
+ *
+ * @n_formats: the size of format_caps list.
+ * @format_caps: format_caps list.
+ */
+struct komeda_format_caps_table {
+       u32 n_formats;
+       const struct komeda_format_caps *format_caps;
+};
+
+const struct komeda_format_caps *
+komeda_get_format_caps(struct komeda_format_caps_table *table,
+                      u32 fourcc, u64 modifier);
+
+u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
+                                 u32 layer_type, u32 *n_fmts);
+
+void komeda_put_fourcc_list(u32 *fourcc_list);
+
+#endif
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
new file mode 100644 (file)
index 0000000..23ee74d
--- /dev/null
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include "komeda_framebuffer.h"
+#include "komeda_dev.h"
+
+static void komeda_fb_destroy(struct drm_framebuffer *fb)
+{
+       struct komeda_fb *kfb = to_kfb(fb);
+       u32 i;
+
+       for (i = 0; i < fb->format->num_planes; i++)
+               drm_gem_object_put_unlocked(fb->obj[i]);
+
+       drm_framebuffer_cleanup(fb);
+       kfree(kfb);
+}
+
+static int komeda_fb_create_handle(struct drm_framebuffer *fb,
+                                  struct drm_file *file, u32 *handle)
+{
+       return drm_gem_handle_create(file, fb->obj[0], handle);
+}
+
+static const struct drm_framebuffer_funcs komeda_fb_funcs = {
+       .destroy        = komeda_fb_destroy,
+       .create_handle  = komeda_fb_create_handle,
+};
+
+static int
+komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
+                              struct drm_file *file,
+                              const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_framebuffer *fb = &kfb->base;
+       struct drm_gem_object *obj;
+       u32 min_size = 0;
+       u32 i;
+
+       for (i = 0; i < fb->format->num_planes; i++) {
+               obj = drm_gem_object_lookup(file, mode_cmd->handles[i]);
+               if (!obj) {
+                       DRM_DEBUG_KMS("Failed to lookup GEM object\n");
+                       fb->obj[i] = NULL;
+
+                       return -ENOENT;
+               }
+
+               kfb->aligned_w = fb->width / (i ? fb->format->hsub : 1);
+               kfb->aligned_h = fb->height / (i ? fb->format->vsub : 1);
+
+               if (fb->pitches[i] % mdev->chip.bus_width) {
+                       DRM_DEBUG_KMS("Pitch[%d]: 0x%x doesn't align to 0x%x\n",
+                                     i, fb->pitches[i], mdev->chip.bus_width);
+                       drm_gem_object_put_unlocked(obj);
+                       fb->obj[i] = NULL;
+
+                       return -EINVAL;
+               }
+
+               min_size = ((kfb->aligned_h / kfb->format_caps->tile_size - 1)
+                           * fb->pitches[i])
+                           + (kfb->aligned_w * fb->format->cpp[i]
+                              * kfb->format_caps->tile_size)
+                           + fb->offsets[i];
+
+               if (obj->size < min_size) {
+                       DRM_DEBUG_KMS("Fail to check none afbc fb size.\n");
+                       drm_gem_object_put_unlocked(obj);
+                       fb->obj[i] = NULL;
+
+                       return -EINVAL;
+               }
+
+               fb->obj[i] = obj;
+       }
+
+       if (fb->format->num_planes == 3) {
+               if (fb->pitches[1] != fb->pitches[2]) {
+                       DRM_DEBUG_KMS("The pitch[1] and [2] are not same\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+struct drm_framebuffer *
+komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+                const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct komeda_dev *mdev = dev->dev_private;
+       struct komeda_fb *kfb;
+       int ret = 0, i;
+
+       kfb = kzalloc(sizeof(*kfb), GFP_KERNEL);
+       if (!kfb)
+               return ERR_PTR(-ENOMEM);
+
+       kfb->format_caps = komeda_get_format_caps(&mdev->fmt_tbl,
+                                                 mode_cmd->pixel_format,
+                                                 mode_cmd->modifier[0]);
+       if (!kfb->format_caps) {
+               DRM_DEBUG_KMS("FMT %x is not supported.\n",
+                             mode_cmd->pixel_format);
+               kfree(kfb);
+               return ERR_PTR(-EINVAL);
+       }
+
+       drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd);
+
+       ret = komeda_fb_none_afbc_size_check(mdev, kfb, file, mode_cmd);
+       if (ret < 0)
+               goto err_cleanup;
+
+       ret = drm_framebuffer_init(dev, &kfb->base, &komeda_fb_funcs);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("failed to initialize fb\n");
+
+               goto err_cleanup;
+       }
+
+       return &kfb->base;
+
+err_cleanup:
+       for (i = 0; i < kfb->base.format->num_planes; i++)
+               drm_gem_object_put_unlocked(kfb->base.obj[i]);
+
+       kfree(kfb);
+       return ERR_PTR(ret);
+}
+
+dma_addr_t
+komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
+{
+       struct drm_framebuffer *fb = &kfb->base;
+       const struct drm_gem_cma_object *obj;
+       u32 plane_x, plane_y, cpp, pitch, offset;
+
+       if (plane >= fb->format->num_planes) {
+               DRM_DEBUG_KMS("Out of max plane num.\n");
+               return -EINVAL;
+       }
+
+       obj = drm_fb_cma_get_gem_obj(fb, plane);
+
+       offset = fb->offsets[plane];
+       if (!fb->modifier) {
+               plane_x = x / (plane ? fb->format->hsub : 1);
+               plane_y = y / (plane ? fb->format->vsub : 1);
+               cpp = fb->format->cpp[plane];
+               pitch = fb->pitches[plane];
+               offset += plane_x * cpp *  kfb->format_caps->tile_size +
+                               (plane_y * pitch) / kfb->format_caps->tile_size;
+       }
+
+       return obj->paddr + offset;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
new file mode 100644 (file)
index 0000000..0de2e4a
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _KOMEDA_FRAMEBUFFER_H_
+#define _KOMEDA_FRAMEBUFFER_H_
+
+#include <drm/drm_framebuffer.h>
+#include "komeda_format_caps.h"
+
+/** struct komeda_fb - entend drm_framebuffer with komeda attribute */
+struct komeda_fb {
+       /** @base: &drm_framebuffer */
+       struct drm_framebuffer base;
+       /* @format_caps: &komeda_format_caps */
+       const struct komeda_format_caps *format_caps;
+       /** @aligned_w: aligned frame buffer width */
+       u32 aligned_w;
+       /** @aligned_h: aligned frame buffer height */
+       u32 aligned_h;
+};
+
+#define to_kfb(dfb)    container_of(dfb, struct komeda_fb, base)
+
+struct drm_framebuffer *
+komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+                const struct drm_mode_fb_cmd2 *mode_cmd);
+dma_addr_t
+komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane);
+bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type);
+
+#endif
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
new file mode 100644 (file)
index 0000000..3fc096d
--- /dev/null
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <linux/component.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <linux/interrupt.h>
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+#include "komeda_framebuffer.h"
+
+DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);
+
+static int komeda_gem_cma_dumb_create(struct drm_file *file,
+                                     struct drm_device *dev,
+                                     struct drm_mode_create_dumb *args)
+{
+       u32 alignment = 16; /* TODO get alignment from dev */
+
+       args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8),
+                           alignment);
+
+       return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
+static struct drm_driver komeda_kms_driver = {
+       .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
+                          DRIVER_PRIME,
+       .lastclose                      = drm_fb_helper_lastclose,
+       .gem_free_object_unlocked       = drm_gem_cma_free_object,
+       .gem_vm_ops                     = &drm_gem_cma_vm_ops,
+       .dumb_create                    = komeda_gem_cma_dumb_create,
+       .prime_handle_to_fd             = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle             = drm_gem_prime_fd_to_handle,
+       .gem_prime_export               = drm_gem_prime_export,
+       .gem_prime_import               = drm_gem_prime_import,
+       .gem_prime_get_sg_table         = drm_gem_cma_prime_get_sg_table,
+       .gem_prime_import_sg_table      = drm_gem_cma_prime_import_sg_table,
+       .gem_prime_vmap                 = drm_gem_cma_prime_vmap,
+       .gem_prime_vunmap               = drm_gem_cma_prime_vunmap,
+       .gem_prime_mmap                 = drm_gem_cma_prime_mmap,
+       .fops = &komeda_cma_fops,
+       .name = "komeda",
+       .desc = "Arm Komeda Display Processor driver",
+       .date = "20181101",
+       .major = 0,
+       .minor = 1,
+};
+
+static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
+{
+       struct drm_device *dev = old_state->dev;
+
+       drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+       drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+       drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+       drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
+       drm_atomic_helper_commit_hw_done(old_state);
+
+       drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+
+static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
+       .atomic_commit_tail = komeda_kms_commit_tail,
+};
+
+static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
+       .fb_create              = komeda_fb_create,
+       .atomic_check           = drm_atomic_helper_check,
+       .atomic_commit          = drm_atomic_helper_commit,
+};
+
+static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
+                                       struct komeda_dev *mdev)
+{
+       struct drm_mode_config *config = &kms->base.mode_config;
+
+       drm_mode_config_init(&kms->base);
+
+       komeda_kms_setup_crtcs(kms, mdev);
+
+       /* Get value from dev */
+       config->min_width       = 0;
+       config->min_height      = 0;
+       config->max_width       = 4096;
+       config->max_height      = 4096;
+       config->allow_fb_modifiers = false;
+
+       config->funcs = &komeda_mode_config_funcs;
+       config->helper_private = &komeda_mode_config_helpers;
+}
+
+struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
+{
+       struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
+       struct drm_device *drm;
+       int err;
+
+       if (!kms)
+               return ERR_PTR(-ENOMEM);
+
+       drm = &kms->base;
+       err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
+       if (err)
+               goto free_kms;
+
+       drm->dev_private = mdev;
+
+       komeda_kms_mode_config_init(kms, mdev);
+
+       err = komeda_kms_add_private_objs(kms, mdev);
+       if (err)
+               goto cleanup_mode_config;
+
+       err = komeda_kms_add_planes(kms, mdev);
+       if (err)
+               goto cleanup_mode_config;
+
+       err = drm_vblank_init(drm, kms->n_crtcs);
+       if (err)
+               goto cleanup_mode_config;
+
+       err = komeda_kms_add_crtcs(kms, mdev);
+       if (err)
+               goto cleanup_mode_config;
+
+       err = component_bind_all(mdev->dev, kms);
+       if (err)
+               goto cleanup_mode_config;
+
+       drm_mode_config_reset(drm);
+
+       err = drm_dev_register(drm, 0);
+       if (err)
+               goto cleanup_mode_config;
+
+       return kms;
+
+cleanup_mode_config:
+       drm_mode_config_cleanup(drm);
+free_kms:
+       kfree(kms);
+       return ERR_PTR(err);
+}
+
+void komeda_kms_detach(struct komeda_kms_dev *kms)
+{
+       struct drm_device *drm = &kms->base;
+       struct komeda_dev *mdev = drm->dev_private;
+
+       drm_dev_unregister(drm);
+       component_unbind_all(mdev->dev, drm);
+       komeda_kms_cleanup_private_objs(mdev);
+       drm_mode_config_cleanup(drm);
+       drm->dev_private = NULL;
+       drm_dev_put(drm);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
new file mode 100644 (file)
index 0000000..f136660
--- /dev/null
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _KOMEDA_KMS_H_
+#define _KOMEDA_KMS_H_
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_writeback.h>
+
+/** struct komeda_plane - komeda instance of drm_plane */
+struct komeda_plane {
+       /** @base: &drm_plane */
+       struct drm_plane base;
+       /**
+        * @layer:
+        *
+        * represents available layer input pipelines for this plane.
+        *
+        * NOTE:
+        * the layer is not for a specific Layer, but indicate a group of
+        * Layers with same capabilities.
+        */
+       struct komeda_layer *layer;
+};
+
+/**
+ * struct komeda_plane_state
+ *
+ * The plane_state can be split into two data flow (left/right) and handled
+ * by two layers &komeda_plane.layer and &komeda_plane.layer.right
+ */
+struct komeda_plane_state {
+       /** @base: &drm_plane_state */
+       struct drm_plane_state base;
+
+       /* private properties */
+};
+
+/**
+ * struct komeda_wb_connector
+ */
+struct komeda_wb_connector {
+       /** @base: &drm_writeback_connector */
+       struct drm_writeback_connector base;
+
+       /** @wb_layer: represents associated writeback pipeline of komeda */
+       struct komeda_layer *wb_layer;
+};
+
+/**
+ * struct komeda_crtc
+ */
+struct komeda_crtc {
+       /** @base: &drm_crtc */
+       struct drm_crtc base;
+       /** @master: only master has display output */
+       struct komeda_pipeline *master;
+       /**
+        * @slave: optional
+        *
+        * Doesn't have its own display output, the handled data flow will
+        * merge into the master.
+        */
+       struct komeda_pipeline *slave;
+};
+
+/** struct komeda_crtc_state */
+struct komeda_crtc_state {
+       /** @base: &drm_crtc_state */
+       struct drm_crtc_state base;
+
+       /* private properties */
+
+       /* computed state which are used by validate/check */
+       u32 affected_pipes;
+       u32 active_pipes;
+};
+
+/** struct komeda_kms_dev - for gather KMS related things */
+struct komeda_kms_dev {
+       /** @base: &drm_device */
+       struct drm_device base;
+
+       /** @n_crtcs: valid numbers of crtcs in &komeda_kms_dev.crtcs */
+       int n_crtcs;
+       /** @crtcs: crtcs list */
+       struct komeda_crtc crtcs[KOMEDA_MAX_PIPELINES];
+};
+
+#define to_kplane(p)   container_of(p, struct komeda_plane, base)
+#define to_kplane_st(p)        container_of(p, struct komeda_plane_state, base)
+#define to_kconn(p)    container_of(p, struct komeda_wb_connector, base)
+#define to_kcrtc(p)    container_of(p, struct komeda_crtc, base)
+#define to_kcrtc_st(p) container_of(p, struct komeda_crtc_state, base)
+#define to_kdev(p)     container_of(p, struct komeda_kms_dev, base)
+
+int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
+
+int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
+int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
+int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
+                               struct komeda_dev *mdev);
+void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev);
+
+struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
+void komeda_kms_detach(struct komeda_kms_dev *kms);
+
+#endif /*_KOMEDA_KMS_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
new file mode 100644 (file)
index 0000000..edb1cd7
--- /dev/null
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include "komeda_dev.h"
+#include "komeda_pipeline.h"
+
+/** komeda_pipeline_add - Add a pipeline to &komeda_dev */
+struct komeda_pipeline *
+komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
+                   struct komeda_pipeline_funcs *funcs)
+{
+       struct komeda_pipeline *pipe;
+
+       if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) {
+               DRM_ERROR("Exceed max support %d pipelines.\n",
+                         KOMEDA_MAX_PIPELINES);
+               return NULL;
+       }
+
+       if (size < sizeof(*pipe)) {
+               DRM_ERROR("Request pipeline size too small.\n");
+               return NULL;
+       }
+
+       pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
+       if (!pipe)
+               return NULL;
+
+       pipe->mdev = mdev;
+       pipe->id   = mdev->n_pipelines;
+       pipe->funcs = funcs;
+
+       mdev->pipelines[mdev->n_pipelines] = pipe;
+       mdev->n_pipelines++;
+
+       return pipe;
+}
+
+void komeda_pipeline_destroy(struct komeda_dev *mdev,
+                            struct komeda_pipeline *pipe)
+{
+       struct komeda_component *c;
+       int i;
+
+       dp_for_each_set_bit(i, pipe->avail_comps) {
+               c = komeda_pipeline_get_component(pipe, i);
+               komeda_component_destroy(mdev, c);
+       }
+
+       clk_put(pipe->pxlclk);
+       clk_put(pipe->aclk);
+
+       of_node_put(pipe->of_output_dev);
+       of_node_put(pipe->of_output_port);
+       of_node_put(pipe->of_node);
+
+       devm_kfree(mdev->dev, pipe);
+}
+
+struct komeda_component **
+komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id)
+{
+       struct komeda_dev *mdev = pipe->mdev;
+       struct komeda_pipeline *temp = NULL;
+       struct komeda_component **pos = NULL;
+
+       switch (id) {
+       case KOMEDA_COMPONENT_LAYER0:
+       case KOMEDA_COMPONENT_LAYER1:
+       case KOMEDA_COMPONENT_LAYER2:
+       case KOMEDA_COMPONENT_LAYER3:
+               pos = to_cpos(pipe->layers[id - KOMEDA_COMPONENT_LAYER0]);
+               break;
+       case KOMEDA_COMPONENT_WB_LAYER:
+               pos = to_cpos(pipe->wb_layer);
+               break;
+       case KOMEDA_COMPONENT_COMPIZ0:
+       case KOMEDA_COMPONENT_COMPIZ1:
+               temp = mdev->pipelines[id - KOMEDA_COMPONENT_COMPIZ0];
+               if (!temp) {
+                       DRM_ERROR("compiz-%d doesn't exist.\n", id);
+                       return NULL;
+               }
+               pos = to_cpos(temp->compiz);
+               break;
+       case KOMEDA_COMPONENT_SCALER0:
+       case KOMEDA_COMPONENT_SCALER1:
+               pos = to_cpos(pipe->scalers[id - KOMEDA_COMPONENT_SCALER0]);
+               break;
+       case KOMEDA_COMPONENT_IPS0:
+       case KOMEDA_COMPONENT_IPS1:
+               temp = mdev->pipelines[id - KOMEDA_COMPONENT_IPS0];
+               if (!temp) {
+                       DRM_ERROR("ips-%d doesn't exist.\n", id);
+                       return NULL;
+               }
+               pos = to_cpos(temp->improc);
+               break;
+       case KOMEDA_COMPONENT_TIMING_CTRLR:
+               pos = to_cpos(pipe->ctrlr);
+               break;
+       default:
+               pos = NULL;
+               DRM_ERROR("Unknown pipeline resource ID: %d.\n", id);
+               break;
+       }
+
+       return pos;
+}
+
+struct komeda_component *
+komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
+{
+       struct komeda_component **pos = NULL;
+       struct komeda_component *c = NULL;
+
+       pos = komeda_pipeline_get_component_pos(pipe, id);
+       if (pos)
+               c = *pos;
+
+       return c;
+}
+
+/** komeda_component_add - Add a component to &komeda_pipeline */
+struct komeda_component *
+komeda_component_add(struct komeda_pipeline *pipe,
+                    size_t comp_sz, u32 id, u32 hw_id,
+                    struct komeda_component_funcs *funcs,
+                    u8 max_active_inputs, u32 supported_inputs,
+                    u8 max_active_outputs, u32 __iomem *reg,
+                    const char *name_fmt, ...)
+{
+       struct komeda_component **pos;
+       struct komeda_component *c;
+       int idx, *num = NULL;
+
+       if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) {
+               WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n",
+                    max_active_inputs);
+               return NULL;
+       }
+
+       pos = komeda_pipeline_get_component_pos(pipe, id);
+       if (!pos || (*pos))
+               return NULL;
+
+       if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) {
+               idx = id - KOMEDA_COMPONENT_LAYER0;
+               num = &pipe->n_layers;
+               if (idx != pipe->n_layers) {
+                       DRM_ERROR("please add Layer by id sequence.\n");
+                       return NULL;
+               }
+       } else if (has_bit(id,  KOMEDA_PIPELINE_SCALERS)) {
+               idx = id - KOMEDA_COMPONENT_SCALER0;
+               num = &pipe->n_scalers;
+               if (idx != pipe->n_scalers) {
+                       DRM_ERROR("please add Scaler by id sequence.\n");
+                       return NULL;
+               }
+       }
+
+       c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL);
+       if (!c)
+               return NULL;
+
+       c->id = id;
+       c->hw_id = hw_id;
+       c->reg = reg;
+       c->pipeline = pipe;
+       c->max_active_inputs = max_active_inputs;
+       c->max_active_outputs = max_active_outputs;
+       c->supported_inputs = supported_inputs;
+       c->funcs = funcs;
+
+       if (name_fmt) {
+               va_list args;
+
+               va_start(args, name_fmt);
+               vsnprintf(c->name, sizeof(c->name), name_fmt, args);
+               va_end(args);
+       }
+
+       if (num)
+               *num = *num + 1;
+
+       pipe->avail_comps |= BIT(c->id);
+       *pos = c;
+
+       return c;
+}
+
+void komeda_component_destroy(struct komeda_dev *mdev,
+                             struct komeda_component *c)
+{
+       devm_kfree(mdev->dev, c);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
new file mode 100644 (file)
index 0000000..8c950bc
--- /dev/null
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _KOMEDA_PIPELINE_H_
+#define _KOMEDA_PIPELINE_H_
+
+#include <linux/types.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include "malidp_utils.h"
+
+#define KOMEDA_MAX_PIPELINES           2
+#define KOMEDA_PIPELINE_MAX_LAYERS     4
+#define KOMEDA_PIPELINE_MAX_SCALERS    2
+#define KOMEDA_COMPONENT_N_INPUTS      5
+
+/* pipeline component IDs */
+enum {
+       KOMEDA_COMPONENT_LAYER0         = 0,
+       KOMEDA_COMPONENT_LAYER1         = 1,
+       KOMEDA_COMPONENT_LAYER2         = 2,
+       KOMEDA_COMPONENT_LAYER3         = 3,
+       KOMEDA_COMPONENT_WB_LAYER       = 7, /* write back layer */
+       KOMEDA_COMPONENT_SCALER0        = 8,
+       KOMEDA_COMPONENT_SCALER1        = 9,
+       KOMEDA_COMPONENT_SPLITTER       = 12,
+       KOMEDA_COMPONENT_MERGER         = 14,
+       KOMEDA_COMPONENT_COMPIZ0        = 16, /* compositor */
+       KOMEDA_COMPONENT_COMPIZ1        = 17,
+       KOMEDA_COMPONENT_IPS0           = 20, /* post image processor */
+       KOMEDA_COMPONENT_IPS1           = 21,
+       KOMEDA_COMPONENT_TIMING_CTRLR   = 22, /* timing controller */
+};
+
+#define KOMEDA_PIPELINE_LAYERS         (BIT(KOMEDA_COMPONENT_LAYER0) |\
+                                        BIT(KOMEDA_COMPONENT_LAYER1) |\
+                                        BIT(KOMEDA_COMPONENT_LAYER2) |\
+                                        BIT(KOMEDA_COMPONENT_LAYER3))
+
+#define KOMEDA_PIPELINE_SCALERS                (BIT(KOMEDA_COMPONENT_SCALER0) |\
+                                        BIT(KOMEDA_COMPONENT_SCALER1))
+
+#define KOMEDA_PIPELINE_COMPIZS                (BIT(KOMEDA_COMPONENT_COMPIZ0) |\
+                                        BIT(KOMEDA_COMPONENT_COMPIZ1))
+
+#define KOMEDA_PIPELINE_IMPROCS                (BIT(KOMEDA_COMPONENT_IPS0) |\
+                                        BIT(KOMEDA_COMPONENT_IPS1))
+struct komeda_component;
+struct komeda_component_state;
+
+/** komeda_component_funcs - component control functions */
+struct komeda_component_funcs {
+       /** @validate: optional,
+        * component may has special requirements or limitations, this function
+        * supply HW the ability to do the further HW specific check.
+        */
+       int (*validate)(struct komeda_component *c,
+                       struct komeda_component_state *state);
+       /** @update: update is a active update */
+       void (*update)(struct komeda_component *c,
+                      struct komeda_component_state *state);
+       /** @disable: disable component */
+       void (*disable)(struct komeda_component *c);
+       /** @dump_register: Optional, dump registers to seq_file */
+       void (*dump_register)(struct komeda_component *c, struct seq_file *seq);
+};
+
+/**
+ * struct komeda_component
+ *
+ * struct komeda_component describe the data flow capabilities for how to link a
+ * component into the display pipeline.
+ * all specified components are subclass of this structure.
+ */
+struct komeda_component {
+       /** @obj: treat component as private obj */
+       struct drm_private_obj obj;
+       /** @pipeline: the komeda pipeline this component belongs to */
+       struct komeda_pipeline *pipeline;
+       /** @name: component name */
+       char name[32];
+       /**
+        * @reg:
+        * component register base,
+        * which is initialized by chip and used by chip only
+        */
+       u32 __iomem *reg;
+       /** @id: component id */
+       u32 id;
+       /** @hw_ic: component hw id,
+        *  which is initialized by chip and used by chip only
+        */
+       u32 hw_id;
+
+       /**
+        * @max_active_inputs:
+        * @max_active_outpus:
+        *
+        * maximum number of inputs/outputs that can be active in the same time
+        * Note:
+        * the number isn't the bit number of @supported_inputs or
+        * @supported_outputs, but may be less than it, since component may not
+        * support enabling all @supported_inputs/outputs at the same time.
+        */
+       u8 max_active_inputs;
+       u8 max_active_outputs;
+       /**
+        * @supported_inputs:
+        * @supported_outputs:
+        *
+        * bitmask of BIT(component->id) for the supported inputs/outputs
+        * describes the possibilities of how a component is linked into a
+        * pipeline.
+        */
+       u32 supported_inputs;
+       u32 supported_outputs;
+
+       /**
+        * @funcs: chip functions to access HW
+        */
+       struct komeda_component_funcs *funcs;
+};
+
+/**
+ * struct komeda_component_output
+ *
+ * a component has multiple outputs, if want to know where the data
+ * comes from, only know the component is not enough, we still need to know
+ * its output port
+ */
+struct komeda_component_output {
+       /** @component: indicate which component the data comes from */
+       struct komeda_component *component;
+       /** @output_port:
+        * the output port of the &komeda_component_output.component
+        */
+       u8 output_port;
+};
+
+/**
+ * struct komeda_component_state
+ *
+ * component_state is the data flow configuration of the component, and it's
+ * the superclass of all specific component_state like @komeda_layer_state,
+ * @komeda_scaler_state
+ */
+struct komeda_component_state {
+       /** @obj: tracking component_state by drm_atomic_state */
+       struct drm_private_state obj;
+       struct komeda_component *component;
+       /**
+        * @binding_user:
+        * currently bound user, the user can be crtc/plane/wb_conn, which is
+        * valid decided by @component and @inputs
+        *
+        * -  Layer: its user always is plane.
+        * -  compiz/improc/timing_ctrlr: the user is crtc.
+        * -  wb_layer: wb_conn;
+        * -  scaler: plane when input is layer, wb_conn if input is compiz.
+        */
+       union {
+               struct drm_crtc *crtc;
+               struct drm_plane *plane;
+               struct drm_connector *wb_conn;
+               void *binding_user;
+       };
+       /**
+        * @active_inputs:
+        *
+        * active_inputs is bitmask of @inputs index
+        *
+        * -  active_inputs = changed_active_inputs + unchanged_active_inputs
+        * -  affected_inputs = old->active_inputs + new->active_inputs;
+        * -  disabling_inputs = affected_inputs ^ active_inputs;
+        * -  changed_inputs = disabling_inputs + changed_active_inputs;
+        *
+        * NOTE:
+        * changed_inputs doesn't include all active_input but only
+        * @changed_active_inputs, and this bitmask can be used in chip
+        * level for dirty update.
+        */
+       u16 active_inputs;
+       u16 changed_active_inputs;
+       u16 affected_inputs;
+       /**
+        * @inputs:
+        *
+        * the specific inputs[i] only valid on BIT(i) has been set in
+        * @active_inputs, if not the inputs[i] is undefined.
+        */
+       struct komeda_component_output inputs[KOMEDA_COMPONENT_N_INPUTS];
+};
+
+static inline u16 component_disabling_inputs(struct komeda_component_state *st)
+{
+       return st->affected_inputs ^ st->active_inputs;
+}
+
+static inline u16 component_changed_inputs(struct komeda_component_state *st)
+{
+       return component_disabling_inputs(st) | st->changed_active_inputs;
+}
+
+#define to_comp(__c)   (((__c) == NULL) ? NULL : &((__c)->base))
+#define to_cpos(__c)   ((struct komeda_component **)&(__c))
+
+/* these structures are going to be filled in in uture patches */
+struct komeda_layer {
+       struct komeda_component base;
+       /* layer specific features and caps */
+       int layer_type; /* RICH, SIMPLE or WB */
+};
+
+struct komeda_layer_state {
+       struct komeda_component_state base;
+       /* layer specific configuration state */
+};
+
+struct komeda_compiz {
+       struct komeda_component base;
+       /* compiz specific features and caps */
+};
+
+struct komeda_compiz_state {
+       struct komeda_component_state base;
+       /* compiz specific configuration state */
+};
+
+struct komeda_scaler {
+       struct komeda_component base;
+       /* scaler features and caps */
+};
+
+struct komeda_scaler_state {
+       struct komeda_component_state base;
+};
+
+struct komeda_improc {
+       struct komeda_component base;
+};
+
+struct komeda_improc_state {
+       struct komeda_component_state base;
+};
+
+/* display timing controller */
+struct komeda_timing_ctrlr {
+       struct komeda_component base;
+};
+
+struct komeda_timing_ctrlr_state {
+       struct komeda_component_state base;
+};
+
+/** struct komeda_pipeline_funcs */
+struct komeda_pipeline_funcs {
+       /* dump_register: Optional, dump registers to seq_file */
+       void (*dump_register)(struct komeda_pipeline *pipe,
+                             struct seq_file *sf);
+};
+
+/**
+ * struct komeda_pipeline
+ *
+ * Represent a complete display pipeline and hold all functional components.
+ */
+struct komeda_pipeline {
+       /** @obj: link pipeline as private obj of drm_atomic_state */
+       struct drm_private_obj obj;
+       /** @mdev: the parent komeda_dev */
+       struct komeda_dev *mdev;
+       /** @pxlclk: pixel clock */
+       struct clk *pxlclk;
+       /** @aclk: AXI clock */
+       struct clk *aclk;
+       /** @id: pipeline id */
+       int id;
+       /** @avail_comps: available components mask of pipeline */
+       u32 avail_comps;
+       int n_layers;
+       struct komeda_layer *layers[KOMEDA_PIPELINE_MAX_LAYERS];
+       int n_scalers;
+       struct komeda_scaler *scalers[KOMEDA_PIPELINE_MAX_SCALERS];
+       struct komeda_compiz *compiz;
+       struct komeda_layer  *wb_layer;
+       struct komeda_improc *improc;
+       struct komeda_timing_ctrlr *ctrlr;
+       struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
+
+       /** @of_node: pipeline dt node */
+       struct device_node *of_node;
+       /** @of_output_port: pipeline output port */
+       struct device_node *of_output_port;
+       /** @of_output_dev: output connector device node */
+       struct device_node *of_output_dev;
+};
+
+/**
+ * struct komeda_pipeline_state
+ *
+ * NOTE:
+ * Unlike the pipeline, pipeline_state doesn’t gather any component_state
+ * into it. It because all component will be managed by drm_atomic_state.
+ */
+struct komeda_pipeline_state {
+       /** @obj: tracking pipeline_state by drm_atomic_state */
+       struct drm_private_state obj;
+       struct komeda_pipeline *pipe;
+       /** @crtc: currently bound crtc */
+       struct drm_crtc *crtc;
+       /**
+        * @active_comps:
+        *
+        * bitmask - BIT(component->id) of active components
+        */
+       u32 active_comps;
+};
+
+#define to_layer(c)    container_of(c, struct komeda_layer, base)
+#define to_compiz(c)   container_of(c, struct komeda_compiz, base)
+#define to_scaler(c)   container_of(c, struct komeda_scaler, base)
+#define to_improc(c)   container_of(c, struct komeda_improc, base)
+#define to_ctrlr(c)    container_of(c, struct komeda_timing_ctrlr, base)
+
+#define to_layer_st(c) container_of(c, struct komeda_layer_state, base)
+#define to_compiz_st(c)        container_of(c, struct komeda_compiz_state, base)
+#define to_scaler_st(c) container_of(c, struct komeda_scaler_state, base)
+#define to_improc_st(c)        container_of(c, struct komeda_improc_state, base)
+#define to_ctrlr_st(c) container_of(c, struct komeda_timing_ctrlr_state, base)
+
+#define priv_to_comp_st(o) container_of(o, struct komeda_component_state, obj)
+#define priv_to_pipe_st(o)  container_of(o, struct komeda_pipeline_state, obj)
+
+/* pipeline APIs */
+struct komeda_pipeline *
+komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
+                   struct komeda_pipeline_funcs *funcs);
+void komeda_pipeline_destroy(struct komeda_dev *mdev,
+                            struct komeda_pipeline *pipe);
+
+struct komeda_component *
+komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id);
+
+/* component APIs */
+struct komeda_component *
+komeda_component_add(struct komeda_pipeline *pipe,
+                    size_t comp_sz, u32 id, u32 hw_id,
+                    struct komeda_component_funcs *funcs,
+                    u8 max_active_inputs, u32 supported_inputs,
+                    u8 max_active_outputs, u32 __iomem *reg,
+                    const char *name_fmt, ...);
+
+void komeda_component_destroy(struct komeda_dev *mdev,
+                             struct komeda_component *c);
+
+#endif /* _KOMEDA_PIPELINE_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
new file mode 100644 (file)
index 0000000..0a4953a
--- /dev/null
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+
+static const struct drm_plane_helper_funcs komeda_plane_helper_funcs = {
+};
+
+static void komeda_plane_destroy(struct drm_plane *plane)
+{
+       drm_plane_cleanup(plane);
+
+       kfree(to_kplane(plane));
+}
+
+static const struct drm_plane_funcs komeda_plane_funcs = {
+};
+
+/* for komeda, which is pipeline can be share between crtcs */
+static u32 get_possible_crtcs(struct komeda_kms_dev *kms,
+                             struct komeda_pipeline *pipe)
+{
+       struct komeda_crtc *crtc;
+       u32 possible_crtcs = 0;
+       int i;
+
+       for (i = 0; i < kms->n_crtcs; i++) {
+               crtc = &kms->crtcs[i];
+
+               if ((pipe == crtc->master) || (pipe == crtc->slave))
+                       possible_crtcs |= BIT(i);
+       }
+
+       return possible_crtcs;
+}
+
+/* use Layer0 as primary */
+static u32 get_plane_type(struct komeda_kms_dev *kms,
+                         struct komeda_component *c)
+{
+       bool is_primary = (c->id == KOMEDA_COMPONENT_LAYER0);
+
+       return is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+}
+
+static int komeda_plane_add(struct komeda_kms_dev *kms,
+                           struct komeda_layer *layer)
+{
+       struct komeda_dev *mdev = kms->base.dev_private;
+       struct komeda_component *c = &layer->base;
+       struct komeda_plane *kplane;
+       struct drm_plane *plane;
+       u32 *formats, n_formats = 0;
+       int err;
+
+       kplane = kzalloc(sizeof(*kplane), GFP_KERNEL);
+       if (!kplane)
+               return -ENOMEM;
+
+       plane = &kplane->base;
+       kplane->layer = layer;
+
+       formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
+                                              layer->layer_type, &n_formats);
+
+       err = drm_universal_plane_init(&kms->base, plane,
+                       get_possible_crtcs(kms, c->pipeline),
+                       &komeda_plane_funcs,
+                       formats, n_formats, NULL,
+                       get_plane_type(kms, c),
+                       "%s", c->name);
+
+       komeda_put_fourcc_list(formats);
+
+       if (err)
+               goto cleanup;
+
+       drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
+
+       return 0;
+cleanup:
+       komeda_plane_destroy(plane);
+       return err;
+}
+
+int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev)
+{
+       struct komeda_pipeline *pipe;
+       int i, j, err;
+
+       for (i = 0; i < mdev->n_pipelines; i++) {
+               pipe = mdev->pipelines[i];
+
+               for (j = 0; j < pipe->n_layers; j++) {
+                       err = komeda_plane_add(kms, pipe->layers[j]);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
new file mode 100644 (file)
index 0000000..f1c9e3f
--- /dev/null
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+
+static struct drm_private_state *
+komeda_pipeline_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+       struct komeda_pipeline_state *st;
+
+       st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return NULL;
+
+       st->active_comps = 0;
+
+       __drm_atomic_helper_private_obj_duplicate_state(obj, &st->obj);
+
+       return &st->obj;
+}
+
+static void
+komeda_pipeline_atomic_destroy_state(struct drm_private_obj *obj,
+                                    struct drm_private_state *state)
+{
+       kfree(priv_to_pipe_st(state));
+}
+
+static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = {
+       .atomic_duplicate_state = komeda_pipeline_atomic_duplicate_state,
+       .atomic_destroy_state   = komeda_pipeline_atomic_destroy_state,
+};
+
+static int komeda_pipeline_obj_add(struct komeda_kms_dev *kms,
+                                  struct komeda_pipeline *pipe)
+{
+       struct komeda_pipeline_state *st;
+
+       st = kzalloc(sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return -ENOMEM;
+
+       st->pipe = pipe;
+       drm_atomic_private_obj_init(&kms->base, &pipe->obj, &st->obj,
+                                   &komeda_pipeline_obj_funcs);
+
+       return 0;
+}
+
+int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
+                               struct komeda_dev *mdev)
+{
+       struct komeda_pipeline *pipe;
+       int i, err;
+
+       for (i = 0; i < mdev->n_pipelines; i++) {
+               pipe = mdev->pipelines[i];
+
+               err = komeda_pipeline_obj_add(kms, pipe);
+               if (err)
+                       return err;
+
+               /* Add component */
+       }
+
+       return 0;
+}
+
+void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev)
+{
+       struct komeda_pipeline *pipe;
+       struct komeda_component *c;
+       int i, id;
+
+       for (i = 0; i < mdev->n_pipelines; i++) {
+               pipe = mdev->pipelines[i];
+               dp_for_each_set_bit(id, pipe->avail_comps) {
+                       c = komeda_pipeline_get_component(pipe, id);
+
+                       drm_atomic_private_obj_fini(&c->obj);
+               }
+               drm_atomic_private_obj_fini(&pipe->obj);
+       }
+}
index e4d67b70244d5716764a6afa1ef2ae991e2e51c6..0b2b62f8fa3c43a3508b23d6e7c2ac07262d5ff3 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/of_graph.h>
 #include <linux/platform_data/simplefb.h>
index dfad8d06d1082569491ba859f57f244fbc8d1d9d..8fc0b884c42885bb7141709828a34424264df417 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modeset_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include "hdlcd_drv.h"
 #include "hdlcd_regs.h"
@@ -229,7 +229,7 @@ static int hdlcd_debugfs_init(struct drm_minor *minor)
 DEFINE_DRM_GEM_CMA_FOPS(fops);
 
 static struct drm_driver hdlcd_driver = {
-       .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+       .driver_features = DRIVER_GEM |
                           DRIVER_MODESET | DRIVER_PRIME |
                           DRIVER_ATOMIC,
        .irq_handler = hdlcd_irq,
index e1b72782848c3cba5dc2c982eae596d6de71617b..56aad288666e4c57f25fa795f1f63ef96c2cc79d 100644 (file)
@@ -14,7 +14,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
 #include <video/videomode.h>
index 505f316a192ec915581fb014eec4b380019f24c0..ab50ad06e2717cecfe10afe51153c85953e108b0 100644 (file)
@@ -23,7 +23,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
index 91472e5e0c8b8a3b0c19a2fb5f860fac83ffcb5b..041a64dc7167c7839d5362c20547e2197dd40d2f 100644 (file)
@@ -8,7 +8,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drmP.h>
index 2f7c048c53613b9086beaa1a5ecaa26c508df884..0e91d27921bd060a0d72819aefa2155098dd53cc 100644 (file)
@@ -9,7 +9,7 @@
  */
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "armada_crtc.h"
 #include "armada_drm.h"
 #include "armada_hw.h"
index da9360688b5546664deb1f41de3e75e12137fa85..ba4a3fab7745449aaaa012ad32554153cc0fe4fe 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include "armada_crtc.h"
@@ -270,13 +270,7 @@ static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
        tm = adj->crtc_vtotal - adj->crtc_vsync_end;
 
        DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
-                     crtc->base.id, crtc->name,
-                     adj->base.id, adj->name, adj->vrefresh, adj->clock,
-                     adj->crtc_hdisplay, adj->crtc_hsync_start,
-                     adj->crtc_hsync_end, adj->crtc_htotal,
-                     adj->crtc_vdisplay, adj->crtc_vsync_start,
-                     adj->crtc_vsync_end, adj->crtc_vtotal,
-                     adj->type, adj->flags);
+                     crtc->base.id, crtc->name, DRM_MODE_ARG(adj));
        DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
 
        /* Now compute the divider for real */
index 7ebd337b60af3d40b9b0fb920cc23126e2a7e330..08761ff01739368d31734f4188d2c01212cd6dde 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef ARMADA_CRTC_H
 #define ARMADA_CRTC_H
 
+#include <drm/drm_crtc.h>
+
 struct armada_gem_object;
 
 struct armada_regs {
index fa31589b4fc0914229eae00cc2d868b43b429eba..e660c5ca52ae73553eef75c4510ac3e896e555dd 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/module.h>
 #include <linux/of_graph.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_of.h>
 #include "armada_crtc.h"
index 6bd638a54579f683d27541e77f80a9efc046b8cb..058ac7d9920f7a07cffe39e90c3dd0bc116963ed 100644 (file)
@@ -5,7 +5,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include "armada_drm.h"
index bf589c53b908d66789679df6f4098c883150fa87..3871b39d4dea1feddb6791b329d9b4fe76e00774 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "ast_drv.h"
 
index c2e41369adcf4b8d43cdd4b0ec829318e9b13654..2c9f8dd9733a404354085db8d5347df2eefc300d 100644 (file)
@@ -39,7 +39,9 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_util.h>
 #include <drm/drm_crtc_helper.h>
+
 #include "ast_drv.h"
 
 static void ast_dirty_update(struct ast_fbdev *afbdev,
@@ -261,7 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
 {
        struct ast_framebuffer *afb = &afbdev->afb;
 
-       drm_crtc_force_disable_all(dev);
+       drm_helper_force_disable_all(dev);
        drm_fb_helper_unregister_fbi(&afbdev->helper);
 
        if (afb->obj) {
index 8bb355d5d43d80169fbfeb73954008b772616e15..97fed0627d1c8fa2dcb2d9a8840a15bb9f3a61d8 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "ast_drv.h"
 
 #include "ast_tables.h"
index 96f4082671fe79c156f9909d30b38f0dc5b675ff..8070a558d7b1bf518774544556d4b937209b2ca8 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/pinctrl/consumer.h>
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include <video/videomode.h>
index 034a91112098d87848fd26c26749680d49c50b07..0be13eceedba927283a8f8e504022eb75a6fba14 100644 (file)
@@ -720,7 +720,7 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
 DEFINE_DRM_GEM_CMA_FOPS(fops);
 
 static struct drm_driver atmel_hlcdc_dc_driver = {
-       .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+       .driver_features = DRIVER_GEM |
                           DRIVER_MODESET | DRIVER_PRIME |
                           DRIVER_ATOMIC,
        .irq_handler = atmel_hlcdc_dc_irq_handler,
index 4cc1e03f0aeedbda36c922149f71d9e46996c327..70bd540d644e4a3d8a21e7e23dbe39f7ace26c41 100644 (file)
@@ -31,7 +31,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
index 9330a076e15a564a08fedba253cd69fbfc33b94c..e836e2de35ce8bd240ce7db48ffa6adf13c96f4e 100644 (file)
@@ -549,7 +549,8 @@ atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
 
                ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s);
 
-               if (!ovl_s->fb ||
+               if (!ovl_s->visible ||
+                   !ovl_s->fb ||
                    ovl_s->fb->format->has_alpha ||
                    ovl_s->alpha != DRM_BLEND_ALPHA_OPAQUE)
                        continue;
@@ -601,15 +602,10 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
        struct drm_framebuffer *fb = state->base.fb;
        const struct drm_display_mode *mode;
        struct drm_crtc_state *crtc_state;
-       unsigned int patched_crtc_w;
-       unsigned int patched_crtc_h;
-       unsigned int patched_src_w;
-       unsigned int patched_src_h;
        unsigned int tmp;
-       int x_offset = 0;
-       int y_offset = 0;
        int hsub = 1;
        int vsub = 1;
+       int ret;
        int i;
 
        if (!state->base.crtc || !fb)
@@ -618,14 +614,21 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
        crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
        mode = &crtc_state->adjusted_mode;
 
-       state->src_x = s->src_x;
-       state->src_y = s->src_y;
-       state->src_h = s->src_h;
-       state->src_w = s->src_w;
-       state->crtc_x = s->crtc_x;
-       state->crtc_y = s->crtc_y;
-       state->crtc_h = s->crtc_h;
-       state->crtc_w = s->crtc_w;
+       ret = drm_atomic_helper_check_plane_state(s, crtc_state,
+                                                 (1 << 16) / 2048,
+                                                 INT_MAX, true, true);
+       if (ret || !s->visible)
+               return ret;
+
+       state->src_x = s->src.x1;
+       state->src_y = s->src.y1;
+       state->src_w = drm_rect_width(&s->src);
+       state->src_h = drm_rect_height(&s->src);
+       state->crtc_x = s->dst.x1;
+       state->crtc_y = s->dst.y1;
+       state->crtc_w = drm_rect_width(&s->dst);
+       state->crtc_h = drm_rect_height(&s->dst);
+
        if ((state->src_x | state->src_y | state->src_w | state->src_h) &
            SUBPIXEL_MASK)
                return -EINVAL;
@@ -639,45 +642,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
        if (state->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES)
                return -EINVAL;
 
-       /*
-        * Swap width and size in case of 90 or 270 degrees rotation
-        */
-       if (drm_rotation_90_or_270(state->base.rotation)) {
-               tmp = state->crtc_w;
-               state->crtc_w = state->crtc_h;
-               state->crtc_h = tmp;
-               tmp = state->src_w;
-               state->src_w = state->src_h;
-               state->src_h = tmp;
-       }
-
-       if (state->crtc_x + state->crtc_w > mode->hdisplay)
-               patched_crtc_w = mode->hdisplay - state->crtc_x;
-       else
-               patched_crtc_w = state->crtc_w;
-
-       if (state->crtc_x < 0) {
-               patched_crtc_w += state->crtc_x;
-               x_offset = -state->crtc_x;
-               state->crtc_x = 0;
-       }
-
-       if (state->crtc_y + state->crtc_h > mode->vdisplay)
-               patched_crtc_h = mode->vdisplay - state->crtc_y;
-       else
-               patched_crtc_h = state->crtc_h;
-
-       if (state->crtc_y < 0) {
-               patched_crtc_h += state->crtc_y;
-               y_offset = -state->crtc_y;
-               state->crtc_y = 0;
-       }
-
-       patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * state->src_w,
-                                         state->crtc_w);
-       patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * state->src_h,
-                                         state->crtc_h);
-
        hsub = drm_format_horz_chroma_subsampling(fb->format->format);
        vsub = drm_format_vert_chroma_subsampling(fb->format->format);
 
@@ -692,41 +656,38 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
 
                switch (state->base.rotation & DRM_MODE_ROTATE_MASK) {
                case DRM_MODE_ROTATE_90:
-                       offset = ((y_offset + state->src_y + patched_src_w - 1) /
-                                 ydiv) * fb->pitches[i];
-                       offset += ((x_offset + state->src_x) / xdiv) *
-                                 state->bpp[i];
-                       state->xstride[i] = ((patched_src_w - 1) / ydiv) *
-                                         fb->pitches[i];
-                       state->pstride[i] = -fb->pitches[i] - state->bpp[i];
+                       offset = (state->src_y / ydiv) *
+                                fb->pitches[i];
+                       offset += ((state->src_x + state->src_w - 1) /
+                                  xdiv) * state->bpp[i];
+                       state->xstride[i] = -(((state->src_h - 1) / ydiv) *
+                                           fb->pitches[i]) -
+                                         (2 * state->bpp[i]);
+                       state->pstride[i] = fb->pitches[i] - state->bpp[i];
                        break;
                case DRM_MODE_ROTATE_180:
-                       offset = ((y_offset + state->src_y + patched_src_h - 1) /
+                       offset = ((state->src_y + state->src_h - 1) /
                                  ydiv) * fb->pitches[i];
-                       offset += ((x_offset + state->src_x + patched_src_w - 1) /
+                       offset += ((state->src_x + state->src_w - 1) /
                                   xdiv) * state->bpp[i];
-                       state->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
+                       state->xstride[i] = ((((state->src_w - 1) / xdiv) - 1) *
                                           state->bpp[i]) - fb->pitches[i];
                        state->pstride[i] = -2 * state->bpp[i];
                        break;
                case DRM_MODE_ROTATE_270:
-                       offset = ((y_offset + state->src_y) / ydiv) *
-                                fb->pitches[i];
-                       offset += ((x_offset + state->src_x + patched_src_h - 1) /
-                                  xdiv) * state->bpp[i];
-                       state->xstride[i] = -(((patched_src_w - 1) / ydiv) *
-                                           fb->pitches[i]) -
-                                         (2 * state->bpp[i]);
-                       state->pstride[i] = fb->pitches[i] - state->bpp[i];
+                       offset = ((state->src_y + state->src_h - 1) /
+                                 ydiv) * fb->pitches[i];
+                       offset += (state->src_x / xdiv) * state->bpp[i];
+                       state->xstride[i] = ((state->src_h - 1) / ydiv) *
+                                         fb->pitches[i];
+                       state->pstride[i] = -fb->pitches[i] - state->bpp[i];
                        break;
                case DRM_MODE_ROTATE_0:
                default:
-                       offset = ((y_offset + state->src_y) / ydiv) *
-                                fb->pitches[i];
-                       offset += ((x_offset + state->src_x) / xdiv) *
-                                 state->bpp[i];
+                       offset = (state->src_y / ydiv) * fb->pitches[i];
+                       offset += (state->src_x / xdiv) * state->bpp[i];
                        state->xstride[i] = fb->pitches[i] -
-                                         ((patched_src_w / xdiv) *
+                                         ((state->src_w / xdiv) *
                                           state->bpp[i]);
                        state->pstride[i] = 0;
                        break;
@@ -735,35 +696,45 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
                state->offsets[i] = offset + fb->offsets[i];
        }
 
-       state->src_w = patched_src_w;
-       state->src_h = patched_src_h;
-       state->crtc_w = patched_crtc_w;
-       state->crtc_h = patched_crtc_h;
+       /*
+        * Swap width and size in case of 90 or 270 degrees rotation
+        */
+       if (drm_rotation_90_or_270(state->base.rotation)) {
+               tmp = state->src_w;
+               state->src_w = state->src_h;
+               state->src_h = tmp;
+       }
 
        if (!desc->layout.size &&
            (mode->hdisplay != state->crtc_w ||
             mode->vdisplay != state->crtc_h))
                return -EINVAL;
 
-       if (desc->max_height && state->crtc_h > desc->max_height)
-               return -EINVAL;
-
-       if (desc->max_width && state->crtc_w > desc->max_width)
-               return -EINVAL;
-
        if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
            (!desc->layout.memsize ||
             state->base.fb->format->has_alpha))
                return -EINVAL;
 
-       if (state->crtc_x < 0 || state->crtc_y < 0)
-               return -EINVAL;
+       return 0;
+}
 
-       if (state->crtc_w + state->crtc_x > mode->hdisplay ||
-           state->crtc_h + state->crtc_y > mode->vdisplay)
-               return -EINVAL;
+static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
+                                            struct drm_plane_state *old_state)
+{
+       struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
 
-       return 0;
+       /* Disable interrupts */
+       atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR,
+                                   0xffffffff);
+
+       /* Disable the layer */
+       atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHDR,
+                                   ATMEL_HLCDC_LAYER_RST |
+                                   ATMEL_HLCDC_LAYER_A2Q |
+                                   ATMEL_HLCDC_LAYER_UPDATE);
+
+       /* Clear all pending interrupts */
+       atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
 }
 
 static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
@@ -777,6 +748,11 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
        if (!p->state->crtc || !p->state->fb)
                return;
 
+       if (!state->base.visible) {
+               atmel_hlcdc_plane_atomic_disable(p, old_s);
+               return;
+       }
+
        atmel_hlcdc_plane_update_pos_and_size(plane, state);
        atmel_hlcdc_plane_update_general_settings(plane, state);
        atmel_hlcdc_plane_update_format(plane, state);
@@ -798,25 +774,6 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
                         ATMEL_HLCDC_LAYER_A2Q : ATMEL_HLCDC_LAYER_EN));
 }
 
-static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
-                                            struct drm_plane_state *old_state)
-{
-       struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-
-       /* Disable interrupts */
-       atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR,
-                                   0xffffffff);
-
-       /* Disable the layer */
-       atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHDR,
-                                   ATMEL_HLCDC_LAYER_RST |
-                                   ATMEL_HLCDC_LAYER_A2Q |
-                                   ATMEL_HLCDC_LAYER_UPDATE);
-
-       /* Clear all pending interrupts */
-       atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
-}
-
 static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
 {
        const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
index 98ef60a19e8f06968cf34b05a224dd200ddc3945..e9e0f8f5eb5b6edde7784131bc685ea2fc2bd0dd 100644 (file)
@@ -1,3 +1,3 @@
-bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
+bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_hw.o
 
 obj-$(CONFIG_DRM_BOCHS)        += bochs-drm.o
index fb38c8b857b5a26a7e40d2469d00fe3ce02546ed..03711394f1eda3315e1695b7dfa7e05e7b55effc 100644 (file)
@@ -80,12 +80,6 @@ struct bochs_device {
                struct ttm_bo_device bdev;
                bool initialized;
        } ttm;
-
-       /* fbdev */
-       struct {
-               struct drm_framebuffer *fb;
-               struct drm_fb_helper helper;
-       } fb;
 };
 
 struct bochs_bo {
@@ -121,8 +115,9 @@ int bochs_hw_init(struct drm_device *dev);
 void bochs_hw_fini(struct drm_device *dev);
 
 void bochs_hw_setmode(struct bochs_device *bochs,
-                     struct drm_display_mode *mode,
-                     const struct drm_format_info *format);
+                     struct drm_display_mode *mode);
+void bochs_hw_setformat(struct bochs_device *bochs,
+                       const struct drm_format_info *format);
 void bochs_hw_setbase(struct bochs_device *bochs,
                      int x, int y, u64 addr);
 int bochs_hw_load_edid(struct bochs_device *bochs);
@@ -141,15 +136,19 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
 int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
                           uint32_t handle, uint64_t *offset);
 
-int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag);
 int bochs_bo_unpin(struct bochs_bo *bo);
 
+int bochs_gem_prime_pin(struct drm_gem_object *obj);
+void bochs_gem_prime_unpin(struct drm_gem_object *obj);
+void *bochs_gem_prime_vmap(struct drm_gem_object *obj);
+void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int bochs_gem_prime_mmap(struct drm_gem_object *obj,
+                        struct vm_area_struct *vma);
+
 /* bochs_kms.c */
 int bochs_kms_init(struct bochs_device *bochs);
 void bochs_kms_fini(struct bochs_device *bochs);
 
 /* bochs_fbdev.c */
-int bochs_fbdev_init(struct bochs_device *bochs);
-void bochs_fbdev_fini(struct bochs_device *bochs);
-
 extern const struct drm_mode_config_funcs bochs_mode_funcs;
index f3dd66ae990aebc8a9518127f74d63c1ee98b252..cb55bdc36f3f01c612a3a187748520465b405675 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "bochs.h"
 
@@ -16,10 +17,6 @@ static int bochs_modeset = -1;
 module_param_named(modeset, bochs_modeset, int, 0444);
 MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting");
 
-static bool enable_fbdev = true;
-module_param_named(fbdev, enable_fbdev, bool, 0444);
-MODULE_PARM_DESC(fbdev, "register fbdev device");
-
 /* ---------------------------------------------------------------------- */
 /* drm interface                                                          */
 
@@ -27,7 +24,6 @@ static void bochs_unload(struct drm_device *dev)
 {
        struct bochs_device *bochs = dev->dev_private;
 
-       bochs_fbdev_fini(bochs);
        bochs_kms_fini(bochs);
        bochs_mm_fini(bochs);
        bochs_hw_fini(dev);
@@ -58,9 +54,6 @@ static int bochs_load(struct drm_device *dev)
        if (ret)
                goto err;
 
-       if (enable_fbdev)
-               bochs_fbdev_init(bochs);
-
        return 0;
 
 err:
@@ -81,7 +74,8 @@ static const struct file_operations bochs_fops = {
 };
 
 static struct drm_driver bochs_driver = {
-       .driver_features        = DRIVER_GEM | DRIVER_MODESET,
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
+                                 DRIVER_PRIME,
        .fops                   = &bochs_fops,
        .name                   = "bochs-drm",
        .desc                   = "bochs dispi vga interface (qemu stdvga)",
@@ -91,6 +85,14 @@ static struct drm_driver bochs_driver = {
        .gem_free_object_unlocked = bochs_gem_free_object,
        .dumb_create            = bochs_dumb_create,
        .dumb_map_offset        = bochs_dumb_mmap_offset,
+
+       .gem_prime_export = drm_gem_prime_export,
+       .gem_prime_import = drm_gem_prime_import,
+       .gem_prime_pin = bochs_gem_prime_pin,
+       .gem_prime_unpin = bochs_gem_prime_unpin,
+       .gem_prime_vmap = bochs_gem_prime_vmap,
+       .gem_prime_vunmap = bochs_gem_prime_vunmap,
+       .gem_prime_mmap = bochs_gem_prime_mmap,
 };
 
 /* ---------------------------------------------------------------------- */
@@ -101,27 +103,16 @@ static int bochs_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct bochs_device *bochs = drm_dev->dev_private;
-
-       drm_kms_helper_poll_disable(drm_dev);
-
-       drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 1);
 
-       return 0;
+       return drm_mode_config_helper_suspend(drm_dev);
 }
 
 static int bochs_pm_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct bochs_device *bochs = drm_dev->dev_private;
-
-       drm_helper_resume_force_mode(drm_dev);
 
-       drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 0);
-
-       drm_kms_helper_poll_enable(drm_dev);
-       return 0;
+       return drm_mode_config_helper_resume(drm_dev);
 }
 #endif
 
@@ -165,6 +156,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
        if (ret)
                goto err_unload;
 
+       drm_fbdev_generic_setup(dev, 32);
        return ret;
 
 err_unload:
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
deleted file mode 100644 (file)
index dd3c7df..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include "bochs.h"
-#include <drm/drm_gem_framebuffer_helper.h>
-
-/* ---------------------------------------------------------------------- */
-
-static int bochsfb_mmap(struct fb_info *info,
-                       struct vm_area_struct *vma)
-{
-       struct drm_fb_helper *fb_helper = info->par;
-       struct bochs_bo *bo = gem_to_bochs_bo(fb_helper->fb->obj[0]);
-
-       return ttm_fbdev_mmap(vma, &bo->bo);
-}
-
-static struct fb_ops bochsfb_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_fillrect = drm_fb_helper_cfb_fillrect,
-       .fb_copyarea = drm_fb_helper_cfb_copyarea,
-       .fb_imageblit = drm_fb_helper_cfb_imageblit,
-       .fb_mmap = bochsfb_mmap,
-};
-
-static int bochsfb_create_object(struct bochs_device *bochs,
-                                const struct drm_mode_fb_cmd2 *mode_cmd,
-                                struct drm_gem_object **gobj_p)
-{
-       struct drm_device *dev = bochs->dev;
-       struct drm_gem_object *gobj;
-       u32 size;
-       int ret = 0;
-
-       size = mode_cmd->pitches[0] * mode_cmd->height;
-       ret = bochs_gem_create(dev, size, true, &gobj);
-       if (ret)
-               return ret;
-
-       *gobj_p = gobj;
-       return ret;
-}
-
-static int bochsfb_create(struct drm_fb_helper *helper,
-                         struct drm_fb_helper_surface_size *sizes)
-{
-       struct bochs_device *bochs =
-               container_of(helper, struct bochs_device, fb.helper);
-       struct fb_info *info;
-       struct drm_framebuffer *fb;
-       struct drm_mode_fb_cmd2 mode_cmd;
-       struct drm_gem_object *gobj = NULL;
-       struct bochs_bo *bo = NULL;
-       int size, ret;
-
-       if (sizes->surface_bpp != 32)
-               return -EINVAL;
-
-       mode_cmd.width = sizes->surface_width;
-       mode_cmd.height = sizes->surface_height;
-       mode_cmd.pitches[0] = sizes->surface_width * 4;
-       mode_cmd.pixel_format = DRM_FORMAT_HOST_XRGB8888;
-       size = mode_cmd.pitches[0] * mode_cmd.height;
-
-       /* alloc, pin & map bo */
-       ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
-       if (ret) {
-               DRM_ERROR("failed to create fbcon backing object %d\n", ret);
-               return ret;
-       }
-
-       bo = gem_to_bochs_bo(gobj);
-
-       ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
-       if (ret)
-               return ret;
-
-       ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
-       if (ret) {
-               DRM_ERROR("failed to pin fbcon\n");
-               ttm_bo_unreserve(&bo->bo);
-               return ret;
-       }
-
-       ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
-                         &bo->kmap);
-       if (ret) {
-               DRM_ERROR("failed to kmap fbcon\n");
-               ttm_bo_unreserve(&bo->bo);
-               return ret;
-       }
-
-       ttm_bo_unreserve(&bo->bo);
-
-       /* init fb device */
-       info = drm_fb_helper_alloc_fbi(helper);
-       if (IS_ERR(info)) {
-               DRM_ERROR("Failed to allocate fbi: %ld\n", PTR_ERR(info));
-               return PTR_ERR(info);
-       }
-
-       info->par = &bochs->fb.helper;
-
-       fb = drm_gem_fbdev_fb_create(bochs->dev, sizes, 0, gobj, NULL);
-       if (IS_ERR(fb)) {
-               DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb));
-               return PTR_ERR(fb);
-       }
-
-       /* setup helper */
-       bochs->fb.helper.fb = fb;
-
-       strcpy(info->fix.id, "bochsdrmfb");
-
-       info->fbops = &bochsfb_ops;
-
-       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-       drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
-                              sizes->fb_height);
-
-       info->screen_base = bo->kmap.virtual;
-       info->screen_size = size;
-
-       drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
-       info->fix.smem_start = 0;
-       info->fix.smem_len = size;
-       return 0;
-}
-
-static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
-       .fb_probe = bochsfb_create,
-};
-
-static struct drm_framebuffer *
-bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
-                   const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 &&
-           mode_cmd->pixel_format != DRM_FORMAT_BGRX8888)
-               return ERR_PTR(-EINVAL);
-
-       return drm_gem_fb_create(dev, file, mode_cmd);
-}
-
-const struct drm_mode_config_funcs bochs_mode_funcs = {
-       .fb_create = bochs_gem_fb_create,
-};
-
-int bochs_fbdev_init(struct bochs_device *bochs)
-{
-       return drm_fb_helper_fbdev_setup(bochs->dev, &bochs->fb.helper,
-                                        &bochs_fb_helper_funcs, 32, 1);
-}
-
-void bochs_fbdev_fini(struct bochs_device *bochs)
-{
-       drm_fb_helper_fbdev_teardown(bochs->dev);
-}
index d0b4e1cee83e8ddd7b1596190986eaf2a1148f79..3e04b2f0ec08037db6a2b6ce4f1c0630bf2c4266 100644 (file)
@@ -204,8 +204,7 @@ void bochs_hw_fini(struct drm_device *dev)
 }
 
 void bochs_hw_setmode(struct bochs_device *bochs,
-                     struct drm_display_mode *mode,
-                     const struct drm_format_info *format)
+                     struct drm_display_mode *mode)
 {
        bochs->xres = mode->hdisplay;
        bochs->yres = mode->vdisplay;
@@ -213,12 +212,8 @@ void bochs_hw_setmode(struct bochs_device *bochs,
        bochs->stride = mode->hdisplay * (bochs->bpp / 8);
        bochs->yres_virtual = bochs->fb_size / bochs->stride;
 
-       DRM_DEBUG_DRIVER("%dx%d @ %d bpp, format %c%c%c%c, vy %d\n",
+       DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
                         bochs->xres, bochs->yres, bochs->bpp,
-                        (format->format >>  0) & 0xff,
-                        (format->format >>  8) & 0xff,
-                        (format->format >> 16) & 0xff,
-                        (format->format >> 24) & 0xff,
                         bochs->yres_virtual);
 
        bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
@@ -236,6 +231,16 @@ void bochs_hw_setmode(struct bochs_device *bochs,
 
        bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
                          VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+}
+
+void bochs_hw_setformat(struct bochs_device *bochs,
+                       const struct drm_format_info *format)
+{
+       DRM_DEBUG_DRIVER("format %c%c%c%c\n",
+                        (format->format >>  0) & 0xff,
+                        (format->format >>  8) & 0xff,
+                        (format->format >> 16) & 0xff,
+                        (format->format >> 24) & 0xff);
 
        switch (format->format) {
        case DRM_FORMAT_XRGB8888:
index f87c284dd93d896c720172e66bb9b8ada9c6e2a8..9cd82e3631fb2b2b3cfb54d68123c3364184554c 100644 (file)
@@ -6,7 +6,11 @@
  */
 
 #include "bochs.h"
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 static int defx = 1024;
 static int defy = 768;
@@ -18,115 +22,51 @@ MODULE_PARM_DESC(defy, "default y resolution");
 
 /* ---------------------------------------------------------------------- */
 
-static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-       case DRM_MODE_DPMS_OFF:
-       default:
-               return;
-       }
-}
-
-static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                                   struct drm_framebuffer *old_fb)
-{
-       struct bochs_device *bochs =
-               container_of(crtc, struct bochs_device, crtc);
-       struct bochs_bo *bo;
-       u64 gpu_addr = 0;
-       int ret;
-
-       if (old_fb) {
-               bo = gem_to_bochs_bo(old_fb->obj[0]);
-               ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
-               if (ret) {
-                       DRM_ERROR("failed to reserve old_fb bo\n");
-               } else {
-                       bochs_bo_unpin(bo);
-                       ttm_bo_unreserve(&bo->bo);
-               }
-       }
-
-       if (WARN_ON(crtc->primary->fb == NULL))
-               return -EINVAL;
-
-       bo = gem_to_bochs_bo(crtc->primary->fb->obj[0]);
-       ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
-       if (ret)
-               return ret;
-
-       ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
-       if (ret) {
-               ttm_bo_unreserve(&bo->bo);
-               return ret;
-       }
-
-       ttm_bo_unreserve(&bo->bo);
-       bochs_hw_setbase(bochs, x, y, gpu_addr);
-       return 0;
-}
-
-static int bochs_crtc_mode_set(struct drm_crtc *crtc,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adjusted_mode,
-                              int x, int y, struct drm_framebuffer *old_fb)
+static void bochs_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct bochs_device *bochs =
                container_of(crtc, struct bochs_device, crtc);
 
-       if (WARN_ON(crtc->primary->fb == NULL))
-               return -EINVAL;
-
-       bochs_hw_setmode(bochs, mode, crtc->primary->fb->format);
-       bochs_crtc_mode_set_base(crtc, x, y, old_fb);
-       return 0;
+       bochs_hw_setmode(bochs, &crtc->mode);
 }
 
-static void bochs_crtc_prepare(struct drm_crtc *crtc)
+static void bochs_crtc_atomic_enable(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *old_crtc_state)
 {
 }
 
-static void bochs_crtc_commit(struct drm_crtc *crtc)
+static void bochs_crtc_atomic_flush(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_crtc_state)
 {
-}
+       struct drm_device *dev = crtc->dev;
+       struct drm_pending_vblank_event *event;
 
-static int bochs_crtc_page_flip(struct drm_crtc *crtc,
-                               struct drm_framebuffer *fb,
-                               struct drm_pending_vblank_event *event,
-                               uint32_t page_flip_flags,
-                               struct drm_modeset_acquire_ctx *ctx)
-{
-       struct bochs_device *bochs =
-               container_of(crtc, struct bochs_device, crtc);
-       struct drm_framebuffer *old_fb = crtc->primary->fb;
-       unsigned long irqflags;
+       if (crtc->state && crtc->state->event) {
+               unsigned long irqflags;
 
-       crtc->primary->fb = fb;
-       bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
-       if (event) {
-               spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
+               spin_lock_irqsave(&dev->event_lock, irqflags);
+               event = crtc->state->event;
+               crtc->state->event = NULL;
                drm_crtc_send_vblank_event(crtc, event);
-               spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
+               spin_unlock_irqrestore(&dev->event_lock, irqflags);
        }
-       return 0;
 }
 
+
 /* These provide the minimum set of functions required to handle a CRTC */
 static const struct drm_crtc_funcs bochs_crtc_funcs = {
-       .set_config = drm_crtc_helper_set_config,
+       .set_config = drm_atomic_helper_set_config,
        .destroy = drm_crtc_cleanup,
-       .page_flip = bochs_crtc_page_flip,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 };
 
 static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
-       .dpms = bochs_crtc_dpms,
-       .mode_set = bochs_crtc_mode_set,
-       .mode_set_base = bochs_crtc_mode_set_base,
-       .prepare = bochs_crtc_prepare,
-       .commit = bochs_crtc_commit,
+       .mode_set_nofb = bochs_crtc_mode_set_nofb,
+       .atomic_enable = bochs_crtc_atomic_enable,
+       .atomic_flush = bochs_crtc_atomic_flush,
 };
 
 static const uint32_t bochs_formats[] = {
@@ -134,6 +74,59 @@ static const uint32_t bochs_formats[] = {
        DRM_FORMAT_BGRX8888,
 };
 
+static void bochs_plane_atomic_update(struct drm_plane *plane,
+                                     struct drm_plane_state *old_state)
+{
+       struct bochs_device *bochs = plane->dev->dev_private;
+       struct bochs_bo *bo;
+
+       if (!plane->state->fb)
+               return;
+       bo = gem_to_bochs_bo(plane->state->fb->obj[0]);
+       bochs_hw_setbase(bochs,
+                        plane->state->crtc_x,
+                        plane->state->crtc_y,
+                        bo->bo.offset);
+       bochs_hw_setformat(bochs, plane->state->fb->format);
+}
+
+static int bochs_plane_prepare_fb(struct drm_plane *plane,
+                               struct drm_plane_state *new_state)
+{
+       struct bochs_bo *bo;
+
+       if (!new_state->fb)
+               return 0;
+       bo = gem_to_bochs_bo(new_state->fb->obj[0]);
+       return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
+}
+
+static void bochs_plane_cleanup_fb(struct drm_plane *plane,
+                                  struct drm_plane_state *old_state)
+{
+       struct bochs_bo *bo;
+
+       if (!old_state->fb)
+               return;
+       bo = gem_to_bochs_bo(old_state->fb->obj[0]);
+       bochs_bo_unpin(bo);
+}
+
+static const struct drm_plane_helper_funcs bochs_plane_helper_funcs = {
+       .atomic_update = bochs_plane_atomic_update,
+       .prepare_fb = bochs_plane_prepare_fb,
+       .cleanup_fb = bochs_plane_cleanup_fb,
+};
+
+static const struct drm_plane_funcs bochs_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .destroy        = drm_primary_helper_destroy,
+       .reset          = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
 static struct drm_plane *bochs_primary_plane(struct drm_device *dev)
 {
        struct drm_plane *primary;
@@ -146,16 +139,17 @@ static struct drm_plane *bochs_primary_plane(struct drm_device *dev)
        }
 
        ret = drm_universal_plane_init(dev, primary, 0,
-                                      &drm_primary_helper_funcs,
+                                      &bochs_plane_funcs,
                                       bochs_formats,
                                       ARRAY_SIZE(bochs_formats),
                                       NULL,
                                       DRM_PLANE_TYPE_PRIMARY, NULL);
        if (ret) {
                kfree(primary);
-               primary = NULL;
+               return NULL;
        }
 
+       drm_plane_helper_add(primary, &bochs_plane_helper_funcs);
        return primary;
 }
 
@@ -170,31 +164,6 @@ static void bochs_crtc_init(struct drm_device *dev)
        drm_crtc_helper_add(crtc, &bochs_helper_funcs);
 }
 
-static void bochs_encoder_mode_set(struct drm_encoder *encoder,
-                                  struct drm_display_mode *mode,
-                                  struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void bochs_encoder_dpms(struct drm_encoder *encoder, int state)
-{
-}
-
-static void bochs_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void bochs_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
-       .dpms = bochs_encoder_dpms,
-       .mode_set = bochs_encoder_mode_set,
-       .prepare = bochs_encoder_prepare,
-       .commit = bochs_encoder_commit,
-};
-
 static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
        .destroy = drm_encoder_cleanup,
 };
@@ -207,7 +176,6 @@ static void bochs_encoder_init(struct drm_device *dev)
        encoder->possible_crtcs = 0x1;
        drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
                         DRM_MODE_ENCODER_DAC, NULL);
-       drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
 }
 
 
@@ -266,6 +234,9 @@ static const struct drm_connector_funcs bochs_connector_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static void bochs_connector_init(struct drm_device *dev)
@@ -287,6 +258,22 @@ static void bochs_connector_init(struct drm_device *dev)
        }
 }
 
+static struct drm_framebuffer *
+bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+                   const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 &&
+           mode_cmd->pixel_format != DRM_FORMAT_BGRX8888)
+               return ERR_PTR(-EINVAL);
+
+       return drm_gem_fb_create(dev, file, mode_cmd);
+}
+
+const struct drm_mode_config_funcs bochs_mode_funcs = {
+       .fb_create = bochs_gem_fb_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
 
 int bochs_kms_init(struct bochs_device *bochs)
 {
@@ -309,6 +296,8 @@ int bochs_kms_init(struct bochs_device *bochs)
        drm_connector_attach_encoder(&bochs->connector,
                                          &bochs->encoder);
 
+       drm_mode_config_reset(bochs->dev);
+
        return 0;
 }
 
index 0980411e41bf0d7d61fa173b4199253795aa2966..641a33f134eeb2f5ef85fee1bf54a8ab8219593a 100644 (file)
@@ -210,33 +210,28 @@ static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
        bo->placement.num_busy_placement = c;
 }
 
-static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
-{
-       return bo->bo.offset;
-}
-
-int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag)
 {
        struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (bo->pin_count) {
                bo->pin_count++;
-               if (gpu_addr)
-                       *gpu_addr = bochs_bo_gpu_offset(bo);
                return 0;
        }
 
        bochs_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+       ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+       if (ret)
+               return ret;
        ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+       ttm_bo_unreserve(&bo->bo);
        if (ret)
                return ret;
 
        bo->pin_count = 1;
-       if (gpu_addr)
-               *gpu_addr = bochs_bo_gpu_offset(bo);
        return 0;
 }
 
@@ -256,7 +251,11 @@ int bochs_bo_unpin(struct bochs_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+       ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+       if (ret)
+               return ret;
        ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+       ttm_bo_unreserve(&bo->bo);
        if (ret)
                return ret;
 
@@ -396,3 +395,52 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
        drm_gem_object_put_unlocked(obj);
        return 0;
 }
+
+/* ---------------------------------------------------------------------- */
+
+int bochs_gem_prime_pin(struct drm_gem_object *obj)
+{
+       struct bochs_bo *bo = gem_to_bochs_bo(obj);
+
+       return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
+}
+
+void bochs_gem_prime_unpin(struct drm_gem_object *obj)
+{
+       struct bochs_bo *bo = gem_to_bochs_bo(obj);
+
+       bochs_bo_unpin(bo);
+}
+
+void *bochs_gem_prime_vmap(struct drm_gem_object *obj)
+{
+       struct bochs_bo *bo = gem_to_bochs_bo(obj);
+       bool is_iomem;
+       int ret;
+
+       ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
+       if (ret)
+               return NULL;
+       ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+       if (ret) {
+               bochs_bo_unpin(bo);
+               return NULL;
+       }
+       return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+}
+
+void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       struct bochs_bo *bo = gem_to_bochs_bo(obj);
+
+       ttm_bo_kunmap(&bo->kmap);
+       bochs_bo_unpin(bo);
+}
+
+int bochs_gem_prime_mmap(struct drm_gem_object *obj,
+                        struct vm_area_struct *vma)
+{
+       struct bochs_bo *bo = gem_to_bochs_bo(obj);
+
+       return ttm_fbdev_mmap(vma, &bo->bo);
+}
index 73d8ccb977427a5b995081db91de5126165565db..996a7e7dbfd65e39abaeb521bea2992ed7798af1 100644 (file)
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
 #include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
 
 #define ADV7511_REG_CHIP_REVISION              0x00
 #define ADV7511_REG_N0                         0x01
@@ -395,7 +397,7 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
 #ifdef CONFIG_DRM_I2C_ADV7533
 void adv7533_dsi_power_on(struct adv7511 *adv);
 void adv7533_dsi_power_off(struct adv7511 *adv);
-void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
+void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
 int adv7533_patch_registers(struct adv7511 *adv);
 int adv7533_patch_cec_registers(struct adv7511 *adv);
 int adv7533_attach_dsi(struct adv7511 *adv);
@@ -411,7 +413,7 @@ static inline void adv7533_dsi_power_off(struct adv7511 *adv)
 }
 
 static inline void adv7533_mode_set(struct adv7511 *adv,
-                                   struct drm_display_mode *mode)
+                                   const struct drm_display_mode *mode)
 {
 }
 
index 85c2d407a52e1a5476b3269d13655606d10478fd..ec2ca71e132321f2c45d6b2e473a75f894059628 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #include <media/cec.h>
 
@@ -676,8 +677,8 @@ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
 }
 
 static void adv7511_mode_set(struct adv7511 *adv7511,
-                            struct drm_display_mode *mode,
-                            struct drm_display_mode *adj_mode)
+                            const struct drm_display_mode *mode,
+                            const struct drm_display_mode *adj_mode)
 {
        unsigned int low_refresh_rate;
        unsigned int hsync_polarity = 0;
@@ -839,8 +840,8 @@ static void adv7511_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adj_mode)
+                                   const struct drm_display_mode *mode,
+                                   const struct drm_display_mode *adj_mode)
 {
        struct adv7511 *adv = bridge_to_adv7511(bridge);
 
index 185b6d84216653003bdd0cd26df89bd6e598b3ad..5d5e7d9eded2f4bda06c8baf8ffd4070a5786207 100644 (file)
@@ -108,7 +108,7 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
        regmap_write(adv->regmap_cec, 0x27, 0x0b);
 }
 
-void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
+void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
 {
        struct mipi_dsi_device *dsi = adv->dsi;
        int lanes, ret;
index f8433c93f4634620c177c77ac67aea70337288ec..c09aaf93ae1b0ad44b48ad41e3bbc072196b86f7 100644 (file)
@@ -31,9 +31,9 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #include "analogix-anx78xx.h"
 
@@ -1082,8 +1082,8 @@ static void anx78xx_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adjusted_mode)
+                               const struct drm_display_mode *mode,
+                               const struct drm_display_mode *adjusted_mode)
 {
        struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
        struct hdmi_avi_infoframe frame;
@@ -1094,8 +1094,9 @@ static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
 
        mutex_lock(&anx78xx->lock);
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode,
-                                                      false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+                                                      &anx78xx->connector,
+                                                      adjusted_mode);
        if (err) {
                DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
                goto unlock;
index 753e96129ab7a63355d3d50e322780f563e76684..225f5e5dd69b8b525fb73f2f356b54f65b85bf0e 100644 (file)
@@ -26,8 +26,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include <drm/bridge/analogix_dp.h>
 
@@ -1361,8 +1361,8 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
-                                       struct drm_display_mode *orig_mode,
-                                       struct drm_display_mode *mode)
+                               const struct drm_display_mode *orig_mode,
+                               const struct drm_display_mode *mode)
 {
        struct analogix_dp_device *dp = bridge->driver_private;
        struct drm_display_info *display_info = &dp->connector.display_info;
index ce9496d13986937f9de7a0cbd4d146b9959d3615..924abe82ea3c3df7cf20a702e57c68d5e82efb57 100644 (file)
@@ -7,12 +7,14 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <video/mipi_display.h>
 
 #include <linux/clk.h>
+#include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
index 9b706789a3417615fa74118186318f514376da80..0805801f4e94c9becb7e360090602a07094e7728 100644 (file)
@@ -18,7 +18,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 struct dumb_vga {
        struct drm_bridge       bridge;
index f56c92f7af7c484b90fcbac98b9177e36177b949..ae8fc597eb381b2551e8beeebdf8eb32a5e2098d 100644 (file)
 #include <drm/drm_bridge.h>
 #include <drm/drm_panel.h>
 
+#include <linux/gpio/consumer.h>
 #include <linux/of_graph.h>
 
 struct lvds_encoder {
        struct drm_bridge bridge;
        struct drm_bridge *panel_bridge;
+       struct gpio_desc *powerdown_gpio;
 };
 
 static int lvds_encoder_attach(struct drm_bridge *bridge)
@@ -28,54 +30,85 @@ static int lvds_encoder_attach(struct drm_bridge *bridge)
                                 bridge);
 }
 
+static void lvds_encoder_enable(struct drm_bridge *bridge)
+{
+       struct lvds_encoder *lvds_encoder = container_of(bridge,
+                                                        struct lvds_encoder,
+                                                        bridge);
+
+       if (lvds_encoder->powerdown_gpio)
+               gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 0);
+}
+
+static void lvds_encoder_disable(struct drm_bridge *bridge)
+{
+       struct lvds_encoder *lvds_encoder = container_of(bridge,
+                                                        struct lvds_encoder,
+                                                        bridge);
+
+       if (lvds_encoder->powerdown_gpio)
+               gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 1);
+}
+
 static struct drm_bridge_funcs funcs = {
        .attach = lvds_encoder_attach,
+       .enable = lvds_encoder_enable,
+       .disable = lvds_encoder_disable,
 };
 
 static int lvds_encoder_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        struct device_node *port;
        struct device_node *endpoint;
        struct device_node *panel_node;
        struct drm_panel *panel;
        struct lvds_encoder *lvds_encoder;
 
-       lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
-                                   GFP_KERNEL);
+       lvds_encoder = devm_kzalloc(dev, sizeof(*lvds_encoder), GFP_KERNEL);
        if (!lvds_encoder)
                return -ENOMEM;
 
+       lvds_encoder->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
+                                                              GPIOD_OUT_HIGH);
+       if (IS_ERR(lvds_encoder->powerdown_gpio)) {
+               int err = PTR_ERR(lvds_encoder->powerdown_gpio);
+
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev, "powerdown GPIO failure: %d\n", err);
+               return err;
+       }
+
        /* Locate the panel DT node. */
-       port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
+       port = of_graph_get_port_by_id(dev->of_node, 1);
        if (!port) {
-               dev_dbg(&pdev->dev, "port 1 not found\n");
+               dev_dbg(dev, "port 1 not found\n");
                return -ENXIO;
        }
 
        endpoint = of_get_child_by_name(port, "endpoint");
        of_node_put(port);
        if (!endpoint) {
-               dev_dbg(&pdev->dev, "no endpoint for port 1\n");
+               dev_dbg(dev, "no endpoint for port 1\n");
                return -ENXIO;
        }
 
        panel_node = of_graph_get_remote_port_parent(endpoint);
        of_node_put(endpoint);
        if (!panel_node) {
-               dev_dbg(&pdev->dev, "no remote endpoint for port 1\n");
+               dev_dbg(dev, "no remote endpoint for port 1\n");
                return -ENXIO;
        }
 
        panel = of_drm_find_panel(panel_node);
        of_node_put(panel_node);
        if (IS_ERR(panel)) {
-               dev_dbg(&pdev->dev, "panel not found, deferring probe\n");
+               dev_dbg(dev, "panel not found, deferring probe\n");
                return PTR_ERR(panel);
        }
 
        lvds_encoder->panel_bridge =
-               devm_drm_panel_bridge_add(&pdev->dev,
-                                         panel, DRM_MODE_CONNECTOR_LVDS);
+               devm_drm_panel_bridge_add(dev, panel, DRM_MODE_CONNECTOR_LVDS);
        if (IS_ERR(lvds_encoder->panel_bridge))
                return PTR_ERR(lvds_encoder->panel_bridge);
 
@@ -83,7 +116,7 @@ static int lvds_encoder_probe(struct platform_device *pdev)
         * but we need a bridge attached to our of_node for our user
         * to look up.
         */
-       lvds_encoder->bridge.of_node = pdev->dev.of_node;
+       lvds_encoder->bridge.of_node = dev->of_node;
        lvds_encoder->bridge.funcs = &funcs;
        drm_bridge_add(&lvds_encoder->bridge);
 
index 2136c97aeb8ec9463ac1d79766448402b9664504..a01028ec4de658cbec0a2069035a763559df08e0 100644 (file)
@@ -36,8 +36,8 @@
 #include <linux/of.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #define EDID_EXT_BLOCK_CNT 0x7E
index a3e817abace101fecc6638b7aa58af2714dbcea3..fb335afea4cf2a76fab143166bb78080085b5500 100644 (file)
 #include <linux/of_gpio.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #define PTN3460_EDID_ADDR                      0x0
index 7cbaba213ef693d11c430533df45c838a56c1229..38eeaf8ba95961bebf01400a0841c31af09a717d 100644 (file)
@@ -12,9 +12,9 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_panel.h>
 
 struct panel_bridge {
@@ -134,8 +134,8 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
 };
 
 /**
- * drm_panel_bridge_add - Creates a drm_bridge and drm_connector that
- * just calls the appropriate functions from drm_panel.
+ * drm_panel_bridge_add - Creates a &drm_bridge and &drm_connector that
+ * just calls the appropriate functions from &drm_panel.
  *
  * @panel: The drm_panel being wrapped.  Must be non-NULL.
  * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
@@ -149,9 +149,12 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
  * passed to drm_bridge_attach().  The drm_panel_prepare() and related
  * functions can be dropped from the encoder driver (they're now
  * called by the KMS helpers before calling into the encoder), along
- * with connector creation.  When done with the bridge,
- * drm_bridge_detach() should be called as normal, then
+ * with connector creation.  When done with the bridge (after
+ * drm_mode_config_cleanup() if the bridge has already been attached), then
  * drm_panel_bridge_remove() to free it.
+ *
+ * See devm_drm_panel_bridge_add() for an automatically manged version of this
+ * function.
  */
 struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
                                        u32 connector_type)
@@ -210,6 +213,17 @@ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
        drm_panel_bridge_remove(*bridge);
 }
 
+/**
+ * devm_drm_panel_bridge_add - Creates a managed &drm_bridge and &drm_connector
+ * that just calls the appropriate functions from &drm_panel.
+ * @dev: device to tie the bridge lifetime to
+ * @panel: The drm_panel being wrapped.  Must be non-NULL.
+ * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
+ * created.
+ *
+ * This is the managed version of drm_panel_bridge_add() which automatically
+ * calls drm_panel_bridge_remove() when @dev is unbound.
+ */
 struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
                                             struct drm_panel *panel,
                                             u32 connector_type)
index 7334d1b62b71f800e7a7fbc0e4b121e0df2bacc2..fda1395b74815a93a5866edb4f1cda6e0696a03a 100644 (file)
@@ -26,9 +26,9 @@
 #include <linux/regulator/consumer.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 /* Brightness scale on the Parade chip */
index bfa902013aa42e06f3eff686d25ba593cd9b2b89..08e12fef1349bd949b28018275312341c99a9bbe 100644 (file)
@@ -30,8 +30,8 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #define SII902X_TPI_VIDEO_DATA                 0x0
 
@@ -232,8 +232,8 @@ static void sii902x_bridge_enable(struct drm_bridge *bridge)
 }
 
 static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adj)
+                                   const struct drm_display_mode *mode,
+                                   const struct drm_display_mode *adj)
 {
        struct sii902x *sii902x = bridge_to_sii902x(bridge);
        struct regmap *regmap = sii902x->regmap;
@@ -258,7 +258,8 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
        if (ret)
                return;
 
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj, false);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+                                                      &sii902x->connector, adj);
        if (ret < 0) {
                DRM_ERROR("couldn't fill AVI infoframe\n");
                return;
index a6e8f4591e636241c6f1e8515fea33dc9147a7f3..0cc293a6ac246b391e11d6366a157d4326c91668 100644 (file)
@@ -1104,8 +1104,7 @@ static void sii8620_set_infoframes(struct sii8620 *ctx,
        int ret;
 
        ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
-                                                      mode,
-                                                      true);
+                                                      NULL, mode);
        if (ctx->use_packed_pixel)
                frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
 
index 2228689d9a5e6ac743f4418e241f2873aabe62f2..5cbb71a866d54bf7a10ce8943ff0b3c364f65d53 100644 (file)
@@ -5,6 +5,10 @@
  * Copyright (c) 2017 Renesas Solutions Corp.
  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
 #include <drm/bridge/dw_hdmi.h>
 
 #include <sound/hdmi-codec.h>
index 64c3cf0275182649d9a5572ce6f3d907165f822a..a63e5f0dae56ad3de5a372ecd167a350d5b89457 100644 (file)
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder_slave.h>
+#include <drm/drm_scdc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/bridge/dw_hdmi.h>
 
 #include <uapi/linux/media-bus-format.h>
 
 #define HDMI_EDID_LEN          512
 
+/* DW-HDMI Controller >= 0x200a are at least compliant with SCDC version 1 */
+#define SCDC_MIN_SOURCE_VERSION        0x1
+
+#define HDMI14_MAX_TMDSCLK     340000000
+
 enum hdmi_datamap {
        RGB444_8B = 0x01,
        RGB444_10B = 0x03,
@@ -93,6 +99,7 @@ struct hdmi_vmode {
        unsigned int mpixelclock;
        unsigned int mpixelrepetitioninput;
        unsigned int mpixelrepetitionoutput;
+       unsigned int mtmdsclock;
 };
 
 struct hdmi_data_info {
@@ -537,7 +544,7 @@ static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
 static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
 {
        mutex_lock(&hdmi->audio_mutex);
-       hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+       hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mtmdsclock,
                                 hdmi->sample_rate);
        mutex_unlock(&hdmi->audio_mutex);
 }
@@ -546,7 +553,7 @@ void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
 {
        mutex_lock(&hdmi->audio_mutex);
        hdmi->sample_rate = rate;
-       hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+       hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mtmdsclock,
                                 hdmi->sample_rate);
        mutex_unlock(&hdmi->audio_mutex);
 }
@@ -647,6 +654,20 @@ static bool hdmi_bus_fmt_is_yuv422(unsigned int bus_format)
        }
 }
 
+static bool hdmi_bus_fmt_is_yuv420(unsigned int bus_format)
+{
+       switch (bus_format) {
+       case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+       case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+       case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+       case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+               return true;
+
+       default:
+               return false;
+       }
+}
+
 static int hdmi_bus_fmt_color_depth(unsigned int bus_format)
 {
        switch (bus_format) {
@@ -876,7 +897,8 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
        u8 val, vp_conf;
 
        if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) ||
-           hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) {
+           hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format) ||
+           hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) {
                switch (hdmi_bus_fmt_color_depth(
                                        hdmi->hdmi_data.enc_out_bus_format)) {
                case 8:
@@ -1015,6 +1037,33 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
 }
 EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
 
+/*
+ * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
+ * - The Source shall suspend transmission of the TMDS clock and data
+ * - The Source shall write to the TMDS_Bit_Clock_Ratio bit to change it
+ * from a 0 to a 1 or from a 1 to a 0
+ * - The Source shall allow a minimum of 1 ms and a maximum of 100 ms from
+ * the time the TMDS_Bit_Clock_Ratio bit is written until resuming
+ * transmission of TMDS clock and data
+ *
+ * To respect the 100ms maximum delay, the dw_hdmi_set_high_tmds_clock_ratio()
+ * helper should called right before enabling the TMDS Clock and Data in
+ * the PHY configuration callback.
+ */
+void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
+{
+       unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
+
+       /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
+       if (hdmi->connector.display_info.hdmi.scdc.supported) {
+               if (mtmdsclock > HDMI14_MAX_TMDSCLK)
+                       drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
+               else
+                       drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 0);
+       }
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
+
 static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable)
 {
        hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0,
@@ -1165,6 +1214,8 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi,
        const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr;
        const struct dw_hdmi_phy_config *phy_config = pdata->phy_config;
 
+       /* TOFIX Will need 420 specific PHY configuration tables */
+
        /* PLL/MPLL Cfg - always match on final entry */
        for (; mpll_config->mpixelclock != ~0UL; mpll_config++)
                if (mpixelclock <= mpll_config->mpixelclock)
@@ -1212,10 +1263,13 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi)
        const struct dw_hdmi_phy_data *phy = hdmi->phy.data;
        const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
        unsigned long mpixelclock = hdmi->hdmi_data.video_mode.mpixelclock;
+       unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
        int ret;
 
        dw_hdmi_phy_power_off(hdmi);
 
+       dw_hdmi_set_high_tmds_clock_ratio(hdmi);
+
        /* Leave low power consumption mode by asserting SVSRET. */
        if (phy->has_svsret)
                dw_hdmi_phy_enable_svsret(hdmi, 1);
@@ -1237,6 +1291,10 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi)
                return ret;
        }
 
+       /* Wait for resuming transmission of TMDS clock and data */
+       if (mtmdsclock > HDMI14_MAX_TMDSCLK)
+               msleep(100);
+
        return dw_hdmi_phy_power_on(hdmi);
 }
 
@@ -1344,12 +1402,15 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
        u8 val;
 
        /* Initialise info frame from DRM mode */
-       drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       drm_hdmi_avi_infoframe_from_display_mode(&frame,
+                                                &hdmi->connector, mode);
 
        if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
                frame.colorspace = HDMI_COLORSPACE_YUV444;
        else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
                frame.colorspace = HDMI_COLORSPACE_YUV422;
+       else if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
+               frame.colorspace = HDMI_COLORSPACE_YUV420;
        else
                frame.colorspace = HDMI_COLORSPACE_RGB;
 
@@ -1503,17 +1564,23 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi,
 static void hdmi_av_composer(struct dw_hdmi *hdmi,
                             const struct drm_display_mode *mode)
 {
-       u8 inv_val;
+       u8 inv_val, bytes;
+       struct drm_hdmi_info *hdmi_info = &hdmi->connector.display_info.hdmi;
        struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
        int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
-       unsigned int vdisplay;
+       unsigned int vdisplay, hdisplay;
 
-       vmode->mpixelclock = mode->clock * 1000;
+       vmode->mtmdsclock = vmode->mpixelclock = mode->clock * 1000;
 
        dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
 
+       if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
+               vmode->mtmdsclock /= 2;
+
        /* Set up HDMI_FC_INVIDCONF */
-       inv_val = (hdmi->hdmi_data.hdcp_enable ?
+       inv_val = (hdmi->hdmi_data.hdcp_enable ||
+                  vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+                  hdmi_info->scdc.scrambling.low_rates ?
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
 
@@ -1546,6 +1613,22 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 
        hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
 
+       hdisplay = mode->hdisplay;
+       hblank = mode->htotal - mode->hdisplay;
+       h_de_hs = mode->hsync_start - mode->hdisplay;
+       hsync_len = mode->hsync_end - mode->hsync_start;
+
+       /*
+        * When we're setting a YCbCr420 mode, we need
+        * to adjust the horizontal timing to suit.
+        */
+       if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) {
+               hdisplay /= 2;
+               hblank /= 2;
+               h_de_hs /= 2;
+               hsync_len /= 2;
+       }
+
        vdisplay = mode->vdisplay;
        vblank = mode->vtotal - mode->vdisplay;
        v_de_vs = mode->vsync_start - mode->vdisplay;
@@ -1562,16 +1645,54 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                vsync_len /= 2;
        }
 
+       /* Scrambling Control */
+       if (hdmi_info->scdc.supported) {
+               if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+                   hdmi_info->scdc.scrambling.low_rates) {
+                       /*
+                        * HDMI2.0 Specifies the following procedure:
+                        * After the Source Device has determined that
+                        * SCDC_Present is set (=1), the Source Device should
+                        * write the accurate Version of the Source Device
+                        * to the Source Version field in the SCDCS.
+                        * Source Devices compliant shall set the
+                        * Source Version = 1.
+                        */
+                       drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION,
+                                      &bytes);
+                       drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION,
+                               min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
+
+                       /* Enabled Scrambling in the Sink */
+                       drm_scdc_set_scrambling(&hdmi->i2c->adap, 1);
+
+                       /*
+                        * To activate the scrambler feature, you must ensure
+                        * that the quasi-static configuration bit
+                        * fc_invidconf.HDCP_keepout is set at configuration
+                        * time, before the required mc_swrstzreq.tmdsswrst_req
+                        * reset request is issued.
+                        */
+                       hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
+                                   HDMI_MC_SWRSTZ);
+                       hdmi_writeb(hdmi, 1, HDMI_FC_SCRAMBLER_CTRL);
+               } else {
+                       hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
+                       hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
+                                   HDMI_MC_SWRSTZ);
+                       drm_scdc_set_scrambling(&hdmi->i2c->adap, 0);
+               }
+       }
+
        /* Set up horizontal active pixel width */
-       hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1);
-       hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0);
+       hdmi_writeb(hdmi, hdisplay >> 8, HDMI_FC_INHACTV1);
+       hdmi_writeb(hdmi, hdisplay, HDMI_FC_INHACTV0);
 
        /* Set up vertical active lines */
        hdmi_writeb(hdmi, vdisplay >> 8, HDMI_FC_INVACTV1);
        hdmi_writeb(hdmi, vdisplay, HDMI_FC_INVACTV0);
 
        /* Set up horizontal blanking pixel region width */
-       hblank = mode->htotal - mode->hdisplay;
        hdmi_writeb(hdmi, hblank >> 8, HDMI_FC_INHBLANK1);
        hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0);
 
@@ -1579,7 +1700,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK);
 
        /* Set up HSYNC active edge delay width (in pixel clks) */
-       h_de_hs = mode->hsync_start - mode->hdisplay;
        hdmi_writeb(hdmi, h_de_hs >> 8, HDMI_FC_HSYNCINDELAY1);
        hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0);
 
@@ -1587,7 +1707,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY);
 
        /* Set up HSYNC active pulse width (in pixel clks) */
-       hsync_len = mode->hsync_end - mode->hsync_start;
        hdmi_writeb(hdmi, hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1);
        hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0);
 
@@ -1998,8 +2117,8 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
 }
 
 static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *orig_mode,
-                                   struct drm_display_mode *mode)
+                                   const struct drm_display_mode *orig_mode,
+                                   const struct drm_display_mode *mode)
 {
        struct dw_hdmi *hdmi = bridge->driver_private;
 
index 9d90eb9c46e5910165c6e36b39c62e5ce9150c2e..3f3c616eba97b6df558c018ff76d1e4a6ef70ce9 100644 (file)
 #define HDMI_FC_MASK2                           0x10DA
 #define HDMI_FC_POL2                            0x10DB
 #define HDMI_FC_PRCONF                          0x10E0
+#define HDMI_FC_SCRAMBLER_CTRL                  0x10E1
 
 #define HDMI_FC_GMD_STAT                        0x1100
 #define HDMI_FC_GMD_EN                          0x1101
index 2f4b145b73af23d7db14845a111c5fc9d4f27736..e915ae8c9a9237c37ca7db4a8aa058af9a9a954a 100644 (file)
@@ -19,9 +19,9 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/bridge/dw_mipi_dsi.h>
 #include <video/mipi_display.h>
 
@@ -248,7 +248,7 @@ static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
  * The controller should generate 2 frames before
  * preparing the peripheral.
  */
-static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
+static void dw_mipi_dsi_wait_for_two_frames(const struct drm_display_mode *mode)
 {
        int refresh, two_frames;
 
@@ -564,7 +564,7 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
 }
 
 static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
-                                  struct drm_display_mode *mode)
+                                  const struct drm_display_mode *mode)
 {
        u32 val = 0, color = 0;
 
@@ -607,7 +607,7 @@ static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
 }
 
 static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
-                                           struct drm_display_mode *mode)
+                                           const struct drm_display_mode *mode)
 {
        /*
         * TODO dw drv improvements
@@ -642,7 +642,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
 
 /* Get lane byte clock cycles. */
 static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
-                                          struct drm_display_mode *mode,
+                                          const struct drm_display_mode *mode,
                                           u32 hcomponent)
 {
        u32 frac, lbcc;
@@ -658,7 +658,7 @@ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
 }
 
 static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
-                                         struct drm_display_mode *mode)
+                                         const struct drm_display_mode *mode)
 {
        u32 htotal, hsa, hbp, lbcc;
 
@@ -681,7 +681,7 @@ static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
 }
 
 static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
-                                              struct drm_display_mode *mode)
+                                       const struct drm_display_mode *mode)
 {
        u32 vactive, vsa, vfp, vbp;
 
@@ -818,7 +818,7 @@ static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
 }
 
 static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
-                               struct drm_display_mode *adjusted_mode)
+                                const struct drm_display_mode *adjusted_mode)
 {
        const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
        void *priv_data = dsi->plat_data->priv_data;
@@ -861,8 +861,8 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
 }
 
 static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
-                                       struct drm_display_mode *mode,
-                                       struct drm_display_mode *adjusted_mode)
+                                       const struct drm_display_mode *mode,
+                                       const struct drm_display_mode *adjusted_mode)
 {
        struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
 
index afd491018bfc55817511ea1d7db9e6fc5f19a99b..a20e454ddd64a98f68804ba4d8b48fd4a1969a62 100644 (file)
@@ -9,11 +9,11 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of_graph.h>
index 8e28e738cb52dec6ee8ea7eda2d655fc7035be93..bbc17530c23dad0e19692dc0aad00f5eff6d6da9 100644 (file)
 #include <linux/slab.h>
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 /* Registers */
 
@@ -203,7 +203,7 @@ struct tc_data {
        /* display edid */
        struct edid             *edid;
        /* current mode */
-       struct drm_display_mode *mode;
+       const struct drm_display_mode   *mode;
 
        u32                     rev;
        u8                      assr;
@@ -648,7 +648,8 @@ err_dpcd_read:
        return ret;
 }
 
-static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+static int tc_set_video_mode(struct tc_data *tc,
+                            const struct drm_display_mode *mode)
 {
        int ret;
        int vid_sync_dly;
@@ -1113,8 +1114,8 @@ static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connec
 }
 
 static void tc_bridge_mode_set(struct drm_bridge *bridge,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adj)
+                              const struct drm_display_mode *mode,
+                              const struct drm_display_mode *adj)
 {
        struct tc_data *tc = bridge_to_tc(bridge);
 
index 10243965ee7c0219737cb6a4e3fa85b728cd1fe4..f72ee137e5f1f87f93cf70275af2c6bd22713d27 100644 (file)
@@ -6,11 +6,11 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
index c3e32138c6bb08c5cdb6c75e7d6624984914e0c0..7bfb4f3388134cafa34e35d0b680bf8beb574a1b 100644 (file)
@@ -20,7 +20,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #define HOTPLUG_DEBOUNCE_MS            1100
 
index db40b77c7f7c909670f51c21bac011663f0145a2..8ec880f3a322adaee3d4f82ef028667615242007 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/console.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "cirrus_drv.h"
 
index 4dd499c7d1ba13fc2cb49e91fc011ece76b07f16..39df62acac69d3c200910fd973e9933cde3b4b8d 100644 (file)
@@ -10,6 +10,7 @@
  */
 #include <linux/module.h>
 #include <drm/drmP.h>
+#include <drm/drm_util.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
@@ -256,6 +257,8 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
 {
        struct drm_framebuffer *gfb = gfbdev->gfb;
 
+       drm_helper_force_disable_all(dev);
+
        drm_fb_helper_unregister_fbi(&gfbdev->helper);
 
        vfree(gfbdev->sysram);
index ed7dcf212a34171f13f73f48c499fde9bb4e66c5..a830e70fc0bbffc00c66903ed7b36d085aa4d2c7 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <video/cirrus.h>
 
index 54e2ae614dccb4e94187df67abfebf776962f2a6..6fe2303fccd99f17b11e262a6e2a3d1151c929a8 100644 (file)
@@ -29,7 +29,6 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_uapi.h>
 #include <drm/drm_plane_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_writeback.h>
 #include <drm/drm_damage_helper.h>
index ba7025041e4641ea16858497fde02dee9dd3c8f4..138b2711d389ebc0b49f2a2a7aab1e012e722347 100644 (file)
@@ -294,8 +294,8 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
  * Note: the bridge passed should be the one closest to the encoder
  */
 void drm_bridge_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode)
+                        const struct drm_display_mode *mode,
+                        const struct drm_display_mode *adjusted_mode)
 {
        if (!bridge)
                return;
index 07dcf47daafe2befc0be3652fbc0e484a69a549e..d5d34d0c79c757fd594cd7755863e07a407e4e8f 100644 (file)
@@ -462,3 +462,46 @@ int drm_plane_create_color_properties(struct drm_plane *plane,
        return 0;
 }
 EXPORT_SYMBOL(drm_plane_create_color_properties);
+
+/**
+ * drm_color_lut_check - check validity of lookup table
+ * @lut: property blob containing LUT to check
+ * @tests: bitmask of tests to run
+ *
+ * Helper to check whether a userspace-provided lookup table is valid and
+ * satisfies hardware requirements.  Drivers pass a bitmask indicating which of
+ * the tests in &drm_color_lut_tests should be performed.
+ *
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests)
+{
+       const struct drm_color_lut *entry;
+       int i;
+
+       if (!lut || !tests)
+               return 0;
+
+       entry = lut->data;
+       for (i = 0; i < drm_color_lut_size(lut); i++) {
+               if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) {
+                       if (entry[i].red != entry[i].blue ||
+                           entry[i].red != entry[i].green) {
+                               DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n");
+                               return -EINVAL;
+                       }
+               }
+
+               if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) {
+                       if (entry[i].red < entry[i - 1].red ||
+                           entry[i].green < entry[i - 1].green ||
+                           entry[i].blue < entry[i - 1].blue) {
+                               DRM_DEBUG_KMS("LUT entries must never decrease.\n");
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_color_lut_check);
index 847539645558c7a9fc752298ae38bda1cda43282..e3ff73695c32fd5a95ffbffbeaa92edb6317fd6b 100644 (file)
@@ -1367,7 +1367,7 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
  *
  *     Absence of the property should indicate absence of support.
  *
- * "vrr_enabled":
+ * "VRR_ENABLED":
  *     Default &drm_crtc boolean property that notifies the driver that the
  *     content on the CRTC is suitable for variable refresh rate presentation.
  *     The driver will take this property as a hint to enable variable
index 1593dd6cdfb729c3bdfe19215947314df1f0a74d..7dabbaf033a16055671ccfdb9d164936a8b9a4da 100644 (file)
@@ -93,15 +93,6 @@ struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx)
 }
 EXPORT_SYMBOL(drm_crtc_from_index);
 
-/**
- * drm_crtc_force_disable - Forcibly turn off a CRTC
- * @crtc: CRTC to turn off
- *
- * Note: This should only be used by non-atomic legacy drivers.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
 int drm_crtc_force_disable(struct drm_crtc *crtc)
 {
        struct drm_mode_set set = {
@@ -112,38 +103,6 @@ int drm_crtc_force_disable(struct drm_crtc *crtc)
 
        return drm_mode_set_config_internal(&set);
 }
-EXPORT_SYMBOL(drm_crtc_force_disable);
-
-/**
- * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
- * @dev: DRM device whose CRTCs to turn off
- *
- * Drivers may want to call this on unload to ensure that all displays are
- * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
- *
- * Note: This should only be used by non-atomic legacy drivers. For an atomic
- * version look at drm_atomic_helper_shutdown().
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_force_disable_all(struct drm_device *dev)
-{
-       struct drm_crtc *crtc;
-       int ret = 0;
-
-       drm_modeset_lock_all(dev);
-       drm_for_each_crtc(crtc, dev)
-               if (crtc->enabled) {
-                       ret = drm_crtc_force_disable(crtc);
-                       if (ret)
-                               goto out;
-               }
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
-EXPORT_SYMBOL(drm_crtc_force_disable_all);
 
 static unsigned int drm_num_crtcs(struct drm_device *dev)
 {
index a3c81850e755f6b8904b662744002d810f04c289..747661f63fbb4f65d35a5f9d1521ff01c5e56ed9 100644 (file)
@@ -93,6 +93,8 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
        struct drm_connector_list_iter conn_iter;
        struct drm_device *dev = encoder->dev;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
        /*
         * We can expect this mutex to be locked if we are not panicking.
         * Locking is currently fubar in the panic handler.
@@ -131,6 +133,8 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
        struct drm_encoder *encoder;
        struct drm_device *dev = crtc->dev;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
        /*
         * We can expect this mutex to be locked if we are not panicking.
         * Locking is currently fubar in the panic handler.
@@ -212,8 +216,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
  */
 void drm_helper_disable_unused_functions(struct drm_device *dev)
 {
-       if (drm_core_check_feature(dev, DRIVER_ATOMIC))
-               DRM_ERROR("Called for atomic driver, this is not what you want.\n");
+       WARN_ON(drm_drv_uses_atomic_modeset(dev));
 
        drm_modeset_lock_all(dev);
        __drm_helper_disable_unused_functions(dev);
@@ -281,6 +284,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
        struct drm_encoder *encoder;
        bool ret = true;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
        drm_warn_on_modeset_not_all_locked(dev);
 
        saved_enabled = crtc->enabled;
@@ -386,9 +391,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
                if (!encoder_funcs)
                        continue;
 
-               DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
-                       encoder->base.id, encoder->name,
-                       mode->base.id, mode->name);
+               DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n",
+                       encoder->base.id, encoder->name, mode->name);
                if (encoder_funcs->mode_set)
                        encoder_funcs->mode_set(encoder, mode, adjusted_mode);
 
@@ -540,6 +544,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
 
        crtc_funcs = set->crtc->helper_private;
 
+       dev = set->crtc->dev;
+       WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
        if (!set->mode)
                set->fb = NULL;
 
@@ -555,8 +562,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
                return 0;
        }
 
-       dev = set->crtc->dev;
-
        drm_warn_on_modeset_not_all_locked(dev);
 
        /*
@@ -875,6 +880,8 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
        struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
        int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(connector->dev));
+
        if (mode == connector->dpms)
                return 0;
 
@@ -946,6 +953,8 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
        int encoder_dpms;
        bool ret;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
        drm_modeset_lock_all(dev);
        drm_for_each_crtc(crtc, dev) {
 
@@ -984,3 +993,38 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
        drm_modeset_unlock_all(dev);
 }
 EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+/**
+ * drm_helper_force_disable_all - Forcibly turn off all enabled CRTCs
+ * @dev: DRM device whose CRTCs to turn off
+ *
+ * Drivers may want to call this on unload to ensure that all displays are
+ * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
+ *
+ * Note: This should only be used by non-atomic legacy drivers. For an atomic
+ * version look at drm_atomic_helper_shutdown().
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_helper_force_disable_all(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       drm_modeset_lock_all(dev);
+       drm_for_each_crtc(crtc, dev)
+               if (crtc->enabled) {
+                       struct drm_mode_set set = {
+                               .crtc = crtc,
+                       };
+
+                       ret = drm_mode_set_config_internal(&set);
+                       if (ret)
+                               goto out;
+               }
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_helper_force_disable_all);
index 86893448f4868a876a49e174b64a29d7514a4108..216f2a9ee3d406e2fed7f21492d1590eef4025f9 100644 (file)
@@ -50,6 +50,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
                            const struct drm_framebuffer *fb);
 int drm_crtc_register_all(struct drm_device *dev);
 void drm_crtc_unregister_all(struct drm_device *dev);
+int drm_crtc_force_disable(struct drm_crtc *crtc);
 
 struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc);
 
index 31032407254d4dab631fb863f58ee5353dbfeb08..e16aa5ae00b48e0c490b7843871de6b0ee87651b 100644 (file)
@@ -333,3 +333,44 @@ drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
        return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
+
+/**
+ * drm_atomic_helper_damage_merged - Merged plane damage
+ * @old_state: Old plane state for validation.
+ * @state: Plane state from which to iterate the damage clips.
+ * @rect: Returns the merged damage rectangle
+ *
+ * This function merges any valid plane damage clips into one rectangle and
+ * returns it in @rect.
+ *
+ * For details see: drm_atomic_helper_damage_iter_init() and
+ * drm_atomic_helper_damage_iter_next().
+ *
+ * Returns:
+ * True if there is valid plane damage otherwise false.
+ */
+bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
+                                    struct drm_plane_state *state,
+                                    struct drm_rect *rect)
+{
+       struct drm_atomic_helper_damage_iter iter;
+       struct drm_rect clip;
+       bool valid = false;
+
+       rect->x1 = INT_MAX;
+       rect->y1 = INT_MAX;
+       rect->x2 = 0;
+       rect->y2 = 0;
+
+       drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+       drm_atomic_for_each_plane_damage(&iter, &clip) {
+               rect->x1 = min(rect->x1, clip.x1);
+               rect->y1 = min(rect->y1, clip.y1);
+               rect->x2 = max(rect->x2, clip.x2);
+               rect->y2 = max(rect->y2, clip.y2);
+               valid = true;
+       }
+
+       return valid;
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_merged);
index 2d6c491a0542ef02655b8bee98a52644000703b3..54120b6319e7489e91c6c82565dd4946d7a64acd 100644 (file)
@@ -154,6 +154,7 @@ u8 drm_dp_link_rate_to_bw_code(int link_rate)
        default:
                WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
                     DP_LINK_BW_1_62);
+               /* fall through */
        case 162000:
                return DP_LINK_BW_1_62;
        case 270000:
@@ -171,6 +172,7 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
        switch (link_bw) {
        default:
                WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
+               /* fall through */
        case DP_LINK_BW_1_62:
                return 162000;
        case DP_LINK_BW_2_7:
@@ -192,11 +194,11 @@ drm_dp_dump_access(const struct drm_dp_aux *aux,
        const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-";
 
        if (ret > 0)
-               drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
-                       aux->name, offset, arrow, ret, min(ret, 20), buffer);
+               DRM_DEBUG_DP("%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
+                            aux->name, offset, arrow, ret, min(ret, 20), buffer);
        else
-               drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d)\n",
-                       aux->name, offset, arrow, ret);
+               DRM_DEBUG_DP("%s: 0x%05x AUX %s (ret=%3d)\n",
+                            aux->name, offset, arrow, ret);
 }
 
 /**
@@ -552,6 +554,7 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
                case DP_DS_16BPC:
                        return 16;
                }
+               /* fall through */
        default:
                return 0;
        }
@@ -884,7 +887,8 @@ static void drm_dp_i2c_msg_set_request(struct drm_dp_aux_msg *msg,
 {
        msg->request = (i2c_msg->flags & I2C_M_RD) ?
                DP_AUX_I2C_READ : DP_AUX_I2C_WRITE;
-       msg->request |= DP_AUX_I2C_MOT;
+       if (!(i2c_msg->flags & I2C_M_STOP))
+               msg->request |= DP_AUX_I2C_MOT;
 }
 
 /*
@@ -1273,6 +1277,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
        { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
        /* LG LP140WF6-SPM1 eDP panel */
        { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
+       /* Apple panels need some additional handling to support PSR */
+       { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
 };
 
 #undef OUI
index 2ab16c9e6243b811903f0e9dab12a45a1a5e2b86..b1c63e9cdf8afbd398bcbebe36bc4fe460377e47 100644 (file)
@@ -33,7 +33,7 @@
 #include <drm/drm_fixed.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 /**
  * DOC: dp mst helper
@@ -46,7 +46,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
                                  char *buf);
 static int test_calc_pbn_mode(void);
 
-static void drm_dp_put_port(struct drm_dp_mst_port *port);
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
 
 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
                                     int id,
@@ -67,6 +67,64 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+
+#define DP_STR(x) [DP_ ## x] = #x
+
+static const char *drm_dp_mst_req_type_str(u8 req_type)
+{
+       static const char * const req_type_str[] = {
+               DP_STR(GET_MSG_TRANSACTION_VERSION),
+               DP_STR(LINK_ADDRESS),
+               DP_STR(CONNECTION_STATUS_NOTIFY),
+               DP_STR(ENUM_PATH_RESOURCES),
+               DP_STR(ALLOCATE_PAYLOAD),
+               DP_STR(QUERY_PAYLOAD),
+               DP_STR(RESOURCE_STATUS_NOTIFY),
+               DP_STR(CLEAR_PAYLOAD_ID_TABLE),
+               DP_STR(REMOTE_DPCD_READ),
+               DP_STR(REMOTE_DPCD_WRITE),
+               DP_STR(REMOTE_I2C_READ),
+               DP_STR(REMOTE_I2C_WRITE),
+               DP_STR(POWER_UP_PHY),
+               DP_STR(POWER_DOWN_PHY),
+               DP_STR(SINK_EVENT_NOTIFY),
+               DP_STR(QUERY_STREAM_ENC_STATUS),
+       };
+
+       if (req_type >= ARRAY_SIZE(req_type_str) ||
+           !req_type_str[req_type])
+               return "unknown";
+
+       return req_type_str[req_type];
+}
+
+#undef DP_STR
+#define DP_STR(x) [DP_NAK_ ## x] = #x
+
+static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
+{
+       static const char * const nak_reason_str[] = {
+               DP_STR(WRITE_FAILURE),
+               DP_STR(INVALID_READ),
+               DP_STR(CRC_FAILURE),
+               DP_STR(BAD_PARAM),
+               DP_STR(DEFER),
+               DP_STR(LINK_FAILURE),
+               DP_STR(NO_RESOURCES),
+               DP_STR(DPCD_FAIL),
+               DP_STR(I2C_NAK),
+               DP_STR(ALLOCATE_FAIL),
+       };
+
+       if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
+           !nak_reason_str[nak_reason])
+               return "unknown";
+
+       return nak_reason_str[nak_reason];
+}
+
+#undef DP_STR
+
 /* sideband msg handling */
 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
 {
@@ -568,7 +626,7 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
        msg->reply_type = (raw->msg[0] & 0x80) >> 7;
        msg->req_type = (raw->msg[0] & 0x7f);
 
-       if (msg->reply_type) {
+       if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
                memcpy(msg->u.nak.guid, &raw->msg[1], 16);
                msg->u.nak.reason = raw->msg[17];
                msg->u.nak.nak_data = raw->msg[18];
@@ -594,7 +652,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
        case DP_POWER_UP_PHY:
                return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
        default:
-               DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
+               DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
+                         drm_dp_mst_req_type_str(msg->req_type));
                return false;
        }
 }
@@ -661,7 +720,8 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
        case DP_RESOURCE_STATUS_NOTIFY:
                return drm_dp_sideband_parse_resource_status_notify(raw, msg);
        default:
-               DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
+               DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
+                         drm_dp_mst_req_type_str(msg->req_type));
                return false;
        }
 }
@@ -850,46 +910,212 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
        if (lct > 1)
                memcpy(mstb->rad, rad, lct / 2);
        INIT_LIST_HEAD(&mstb->ports);
-       kref_init(&mstb->kref);
+       kref_init(&mstb->topology_kref);
+       kref_init(&mstb->malloc_kref);
        return mstb;
 }
 
-static void drm_dp_free_mst_port(struct kref *kref);
-
 static void drm_dp_free_mst_branch_device(struct kref *kref)
 {
-       struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
-       if (mstb->port_parent) {
-               if (list_empty(&mstb->port_parent->next))
-                       kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
-       }
+       struct drm_dp_mst_branch *mstb =
+               container_of(kref, struct drm_dp_mst_branch, malloc_kref);
+
+       if (mstb->port_parent)
+               drm_dp_mst_put_port_malloc(mstb->port_parent);
+
        kfree(mstb);
 }
 
+/**
+ * DOC: Branch device and port refcounting
+ *
+ * Topology refcount overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The refcounting schemes for &struct drm_dp_mst_branch and &struct
+ * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
+ * two different kinds of refcounts: topology refcounts, and malloc refcounts.
+ *
+ * Topology refcounts are not exposed to drivers, and are handled internally
+ * by the DP MST helpers. The helpers use them in order to prevent the
+ * in-memory topology state from being changed in the middle of critical
+ * operations like changing the internal state of payload allocations. This
+ * means each branch and port will be considered to be connected to the rest
+ * of the topology until it's topology refcount reaches zero. Additionally,
+ * for ports this means that their associated &struct drm_connector will stay
+ * registered with userspace until the port's refcount reaches 0.
+ *
+ * Malloc refcount overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
+ * drm_dp_mst_branch allocated even after all of its topology references have
+ * been dropped, so that the driver or MST helpers can safely access each
+ * branch's last known state before it was disconnected from the topology.
+ * When the malloc refcount of a port or branch reaches 0, the memory
+ * allocation containing the &struct drm_dp_mst_branch or &struct
+ * drm_dp_mst_port respectively will be freed.
+ *
+ * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
+ * to drivers. As of writing this documentation, there are no drivers that
+ * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
+ * helpers. Exposing this API to drivers in a race-free manner would take more
+ * tweaking of the refcounting scheme, however patches are welcome provided
+ * there is a legitimate driver usecase for this.
+ *
+ * Refcount relationships in a topology
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Let's take a look at why the relationship between topology and malloc
+ * refcounts is designed the way it is.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-1.dot
+ *
+ *    An example of topology and malloc refs in a DP MST topology with two
+ *    active payloads. Topology refcount increments are indicated by solid
+ *    lines, and malloc refcount increments are indicated by dashed lines.
+ *    Each starts from the branch which incremented the refcount, and ends at
+ *    the branch to which the refcount belongs to, i.e. the arrow points the
+ *    same way as the C pointers used to reference a structure.
+ *
+ * As you can see in the above figure, every branch increments the topology
+ * refcount of it's children, and increments the malloc refcount of it's
+ * parent. Additionally, every payload increments the malloc refcount of it's
+ * assigned port by 1.
+ *
+ * So, what would happen if MSTB #3 from the above figure was unplugged from
+ * the system, but the driver hadn't yet removed payload #2 from port #3? The
+ * topology would start to look like the figure below.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-2.dot
+ *
+ *    Ports and branch devices which have been released from memory are
+ *    colored grey, and references which have been removed are colored red.
+ *
+ * Whenever a port or branch device's topology refcount reaches zero, it will
+ * decrement the topology refcounts of all its children, the malloc refcount
+ * of its parent, and finally its own malloc refcount. For MSTB #4 and port
+ * #4, this means they both have been disconnected from the topology and freed
+ * from memory. But, because payload #2 is still holding a reference to port
+ * #3, port #3 is removed from the topology but it's &struct drm_dp_mst_port
+ * is still accessible from memory. This also means port #3 has not yet
+ * decremented the malloc refcount of MSTB #3, so it's &struct
+ * drm_dp_mst_branch will also stay allocated in memory until port #3's
+ * malloc refcount reaches 0.
+ *
+ * This relationship is necessary because in order to release payload #2, we
+ * need to be able to figure out the last relative of port #3 that's still
+ * connected to the topology. In this case, we would travel up the topology as
+ * shown below.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-3.dot
+ *
+ * And finally, remove payload #2 by communicating with port #2 through
+ * sideband transactions.
+ */
+
+/**
+ * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
+ *
+ * Increments &drm_dp_mst_branch.malloc_kref. When
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
+ * will be released and @mstb may no longer be used.
+ *
+ * See also: drm_dp_mst_put_mstb_malloc()
+ */
+static void
+drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
+{
+       kref_get(&mstb->malloc_kref);
+       DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
+}
+
+/**
+ * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
+ *
+ * Decrements &drm_dp_mst_branch.malloc_kref. When
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
+ * will be released and @mstb may no longer be used.
+ *
+ * See also: drm_dp_mst_get_mstb_malloc()
+ */
+static void
+drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
+{
+       DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
+       kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
+}
+
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+       struct drm_dp_mst_port *port =
+               container_of(kref, struct drm_dp_mst_port, malloc_kref);
+
+       drm_dp_mst_put_mstb_malloc(port->parent);
+       kfree(port);
+}
+
+/**
+ * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
+ * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
+ *
+ * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
+ * reaches 0, the memory allocation for @port will be released and @port may
+ * no longer be used.
+ *
+ * Because @port could potentially be freed at any time by the DP MST helpers
+ * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
+ * function, drivers that which to make use of &struct drm_dp_mst_port should
+ * ensure that they grab at least one main malloc reference to their MST ports
+ * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
+ * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
+ *
+ * See also: drm_dp_mst_put_port_malloc()
+ */
+void
+drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
+{
+       kref_get(&port->malloc_kref);
+       DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
+}
+EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
+
+/**
+ * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
+ * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
+ *
+ * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
+ * reaches 0, the memory allocation for @port will be released and @port may
+ * no longer be used.
+ *
+ * See also: drm_dp_mst_get_port_malloc()
+ */
+void
+drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
+{
+       DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
+       kref_put(&port->malloc_kref, drm_dp_free_mst_port);
+}
+EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
+
 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 {
-       struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+       struct drm_dp_mst_branch *mstb =
+               container_of(kref, struct drm_dp_mst_branch, topology_kref);
+       struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
 
-       /*
-        * init kref again to be used by ports to remove mst branch when it is
-        * not needed anymore
-        */
-       kref_init(kref);
-
-       if (mstb->port_parent && list_empty(&mstb->port_parent->next))
-               kref_get(&mstb->port_parent->kref);
-
-       /*
-        * destroy all ports - don't need lock
-        * as there are no more references to the mst branch
-        * device at this point.
-        */
+       mutex_lock(&mgr->lock);
        list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
                list_del(&port->next);
-               drm_dp_put_port(port);
+               drm_dp_mst_topology_put_port(port);
        }
+       mutex_unlock(&mgr->lock);
 
        /* drop any tx slots msg */
        mutex_lock(&mstb->mgr->qlock);
@@ -908,14 +1134,83 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
        if (wake_tx)
                wake_up_all(&mstb->mgr->tx_waitq);
 
-       kref_put(kref, drm_dp_free_mst_branch_device);
+       drm_dp_mst_put_mstb_malloc(mstb);
 }
 
-static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+/**
+ * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
+ * branch device unless its zero
+ * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
+ *
+ * Attempts to grab a topology reference to @mstb, if it hasn't yet been
+ * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
+ * reached 0). Holding a topology reference implies that a malloc reference
+ * will be held to @mstb as long as the user holds the topology reference.
+ *
+ * Care should be taken to ensure that the user has at least one malloc
+ * reference to @mstb. If you already have a topology reference to @mstb, you
+ * should use drm_dp_mst_topology_get_mstb() instead.
+ *
+ * See also:
+ * drm_dp_mst_topology_get_mstb()
+ * drm_dp_mst_topology_put_mstb()
+ *
+ * Returns:
+ * * 1: A topology reference was grabbed successfully
+ * * 0: @port is no longer in the topology, no reference was grabbed
+ */
+static int __must_check
+drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
 {
-       kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
+       int ret = kref_get_unless_zero(&mstb->topology_kref);
+
+       if (ret)
+               DRM_DEBUG("mstb %p (%d)\n", mstb,
+                         kref_read(&mstb->topology_kref));
+
+       return ret;
 }
 
+/**
+ * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
+ * branch device
+ * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
+ *
+ * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
+ * not it's already reached 0. This is only valid to use in scenarios where
+ * you are already guaranteed to have at least one active topology reference
+ * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_mstb()
+ * drm_dp_mst_topology_put_mstb()
+ */
+static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
+{
+       WARN_ON(kref_read(&mstb->topology_kref) == 0);
+       kref_get(&mstb->topology_kref);
+       DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+}
+
+/**
+ * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
+ *
+ * Releases a topology reference from @mstb by decrementing
+ * &drm_dp_mst_branch.topology_kref.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_mstb()
+ * drm_dp_mst_topology_get_mstb()
+ */
+static void
+drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
+{
+       DRM_DEBUG("mstb %p (%d)\n",
+                 mstb, kref_read(&mstb->topology_kref) - 1);
+       kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
+}
 
 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
 {
@@ -930,19 +1225,18 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
        case DP_PEER_DEVICE_MST_BRANCHING:
                mstb = port->mstb;
                port->mstb = NULL;
-               drm_dp_put_mst_branch_device(mstb);
+               drm_dp_mst_topology_put_mstb(mstb);
                break;
        }
 }
 
 static void drm_dp_destroy_port(struct kref *kref)
 {
-       struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+       struct drm_dp_mst_port *port =
+               container_of(kref, struct drm_dp_mst_port, topology_kref);
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
 
        if (!port->input) {
-               port->vcpi.num_slots = 0;
-
                kfree(port->cached_edid);
 
                /*
@@ -956,7 +1250,6 @@ static void drm_dp_destroy_port(struct kref *kref)
                         * from an EDID retrieval */
 
                        mutex_lock(&mgr->destroy_connector_lock);
-                       kref_get(&port->parent->kref);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
@@ -967,25 +1260,95 @@ static void drm_dp_destroy_port(struct kref *kref)
                drm_dp_port_teardown_pdt(port, port->pdt);
                port->pdt = DP_PEER_DEVICE_NONE;
        }
-       kfree(port);
+       drm_dp_mst_put_port_malloc(port);
 }
 
-static void drm_dp_put_port(struct drm_dp_mst_port *port)
+/**
+ * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
+ * port unless its zero
+ * @port: &struct drm_dp_mst_port to increment the topology refcount of
+ *
+ * Attempts to grab a topology reference to @port, if it hasn't yet been
+ * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
+ * 0). Holding a topology reference implies that a malloc reference will be
+ * held to @port as long as the user holds the topology reference.
+ *
+ * Care should be taken to ensure that the user has at least one malloc
+ * reference to @port. If you already have a topology reference to @port, you
+ * should use drm_dp_mst_topology_get_port() instead.
+ *
+ * See also:
+ * drm_dp_mst_topology_get_port()
+ * drm_dp_mst_topology_put_port()
+ *
+ * Returns:
+ * * 1: A topology reference was grabbed successfully
+ * * 0: @port is no longer in the topology, no reference was grabbed
+ */
+static int __must_check
+drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
+{
+       int ret = kref_get_unless_zero(&port->topology_kref);
+
+       if (ret)
+               DRM_DEBUG("port %p (%d)\n", port,
+                         kref_read(&port->topology_kref));
+
+       return ret;
+}
+
+/**
+ * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
+ * @port: The &struct drm_dp_mst_port to increment the topology refcount of
+ *
+ * Increments &drm_dp_mst_port.topology_refcount without checking whether or
+ * not it's already reached 0. This is only valid to use in scenarios where
+ * you are already guaranteed to have at least one active topology reference
+ * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_port()
+ * drm_dp_mst_topology_put_port()
+ */
+static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
+{
+       WARN_ON(kref_read(&port->topology_kref) == 0);
+       kref_get(&port->topology_kref);
+       DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+}
+
+/**
+ * drm_dp_mst_topology_put_port() - release a topology reference to a port
+ * @port: The &struct drm_dp_mst_port to release the topology reference from
+ *
+ * Releases a topology reference from @port by decrementing
+ * &drm_dp_mst_port.topology_kref.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_port()
+ * drm_dp_mst_topology_get_port()
+ */
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
 {
-       kref_put(&port->kref, drm_dp_destroy_port);
+       DRM_DEBUG("port %p (%d)\n",
+                 port, kref_read(&port->topology_kref) - 1);
+       kref_put(&port->topology_kref, drm_dp_destroy_port);
 }
 
-static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
+                                             struct drm_dp_mst_branch *to_find)
 {
        struct drm_dp_mst_port *port;
        struct drm_dp_mst_branch *rmstb;
-       if (to_find == mstb) {
-               kref_get(&mstb->kref);
+
+       if (to_find == mstb)
                return mstb;
-       }
+
        list_for_each_entry(port, &mstb->ports, next) {
                if (port->mstb) {
-                       rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+                       rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+                           port->mstb, to_find);
                        if (rmstb)
                                return rmstb;
                }
@@ -993,27 +1356,37 @@ static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct
        return NULL;
 }
 
-static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
+                                      struct drm_dp_mst_branch *mstb)
 {
        struct drm_dp_mst_branch *rmstb = NULL;
+
        mutex_lock(&mgr->lock);
-       if (mgr->mst_primary)
-               rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+       if (mgr->mst_primary) {
+               rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+                   mgr->mst_primary, mstb);
+
+               if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
+                       rmstb = NULL;
+       }
        mutex_unlock(&mgr->lock);
        return rmstb;
 }
 
-static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
+static struct drm_dp_mst_port *
+drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
+                                             struct drm_dp_mst_port *to_find)
 {
        struct drm_dp_mst_port *port, *mport;
 
        list_for_each_entry(port, &mstb->ports, next) {
-               if (port == to_find) {
-                       kref_get(&port->kref);
+               if (port == to_find)
                        return port;
-               }
+
                if (port->mstb) {
-                       mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
+                       mport = drm_dp_mst_topology_get_port_validated_locked(
+                           port->mstb, to_find);
                        if (mport)
                                return mport;
                }
@@ -1021,12 +1394,20 @@ static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_
        return NULL;
 }
 
-static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+static struct drm_dp_mst_port *
+drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
+                                      struct drm_dp_mst_port *port)
 {
        struct drm_dp_mst_port *rport = NULL;
+
        mutex_lock(&mgr->lock);
-       if (mgr->mst_primary)
-               rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
+       if (mgr->mst_primary) {
+               rport = drm_dp_mst_topology_get_port_validated_locked(
+                   mgr->mst_primary, port);
+
+               if (rport && !drm_dp_mst_topology_try_get_port(rport))
+                       rport = NULL;
+       }
        mutex_unlock(&mgr->lock);
        return rport;
 }
@@ -1034,11 +1415,12 @@ static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_t
 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
 {
        struct drm_dp_mst_port *port;
+       int ret;
 
        list_for_each_entry(port, &mstb->ports, next) {
                if (port->port_num == port_num) {
-                       kref_get(&port->kref);
-                       return port;
+                       ret = drm_dp_mst_topology_try_get_port(port);
+                       return ret ? port : NULL;
                }
        }
 
@@ -1087,6 +1469,11 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
                if (port->mstb) {
                        port->mstb->mgr = port->mgr;
                        port->mstb->port_parent = port;
+                       /*
+                        * Make sure this port's memory allocation stays
+                        * around until it's child MSTB releases it
+                        */
+                       drm_dp_mst_get_port_malloc(port);
 
                        send_link = true;
                }
@@ -1147,17 +1534,26 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
        bool created = false;
        int old_pdt = 0;
        int old_ddps = 0;
+
        port = drm_dp_get_port(mstb, port_msg->port_number);
        if (!port) {
                port = kzalloc(sizeof(*port), GFP_KERNEL);
                if (!port)
                        return;
-               kref_init(&port->kref);
+               kref_init(&port->topology_kref);
+               kref_init(&port->malloc_kref);
                port->parent = mstb;
                port->port_num = port_msg->port_number;
                port->mgr = mstb->mgr;
                port->aux.name = "DPMST";
                port->aux.dev = dev->dev;
+
+               /*
+                * Make sure the memory allocation for our parent branch stays
+                * around until our own memory allocation is released
+                */
+               drm_dp_mst_get_mstb_malloc(mstb);
+
                created = true;
        } else {
                old_pdt = port->pdt;
@@ -1177,18 +1573,20 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
           for this list */
        if (created) {
                mutex_lock(&mstb->mgr->lock);
-               kref_get(&port->kref);
+               drm_dp_mst_topology_get_port(port);
                list_add(&port->next, &mstb->ports);
                mutex_unlock(&mstb->mgr->lock);
        }
 
        if (old_ddps != port->ddps) {
                if (port->ddps) {
-                       if (!port->input)
-                               drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+                       if (!port->input) {
+                               drm_dp_send_enum_path_resources(mstb->mgr,
+                                                               mstb, port);
+                       }
                } else {
                        port->available_pbn = 0;
-                       }
+               }
        }
 
        if (old_pdt != port->pdt && !port->input) {
@@ -1202,21 +1600,25 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
        if (created && !port->input) {
                char proppath[255];
 
-               build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
-               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+               build_mst_prop_path(mstb, port->port_num, proppath,
+                                   sizeof(proppath));
+               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
+                                                                  port,
+                                                                  proppath);
                if (!port->connector) {
                        /* remove it from the port list */
                        mutex_lock(&mstb->mgr->lock);
                        list_del(&port->next);
                        mutex_unlock(&mstb->mgr->lock);
                        /* drop port list reference */
-                       drm_dp_put_port(port);
+                       drm_dp_mst_topology_put_port(port);
                        goto out;
                }
                if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
                     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
                    port->port_num >= DP_MST_LOGICAL_PORT_0) {
-                       port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+                       port->cached_edid = drm_get_edid(port->connector,
+                                                        &port->aux.ddc);
                        drm_connector_set_tile_property(port->connector);
                }
                (*mstb->mgr->cbs->register_connector)(port->connector);
@@ -1224,7 +1626,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
 
 out:
        /* put reference to this port */
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_port(port);
 }
 
 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
@@ -1259,7 +1661,7 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
                        dowork = true;
        }
 
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_port(port);
        if (dowork)
                queue_work(system_long_wq, &mstb->mgr->work);
 
@@ -1270,7 +1672,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
 {
        struct drm_dp_mst_branch *mstb;
        struct drm_dp_mst_port *port;
-       int i;
+       int i, ret;
        /* find the port by iterating down */
 
        mutex_lock(&mgr->lock);
@@ -1295,7 +1697,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
                        }
                }
        }
-       kref_get(&mstb->kref);
+       ret = drm_dp_mst_topology_try_get_mstb(mstb);
+       if (!ret)
+               mstb = NULL;
 out:
        mutex_unlock(&mgr->lock);
        return mstb;
@@ -1325,19 +1729,22 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
        return NULL;
 }
 
-static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
-       struct drm_dp_mst_topology_mgr *mgr,
-       uint8_t *guid)
+static struct drm_dp_mst_branch *
+drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
+                                    uint8_t *guid)
 {
        struct drm_dp_mst_branch *mstb;
+       int ret;
 
        /* find the port by iterating down */
        mutex_lock(&mgr->lock);
 
        mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
-
-       if (mstb)
-               kref_get(&mstb->kref);
+       if (mstb) {
+               ret = drm_dp_mst_topology_try_get_mstb(mstb);
+               if (!ret)
+                       mstb = NULL;
+       }
 
        mutex_unlock(&mgr->lock);
        return mstb;
@@ -1362,10 +1769,11 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
                        drm_dp_send_enum_path_resources(mgr, mstb, port);
 
                if (port->mstb) {
-                       mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
+                       mstb_child = drm_dp_mst_topology_get_mstb_validated(
+                           mgr, port->mstb);
                        if (mstb_child) {
                                drm_dp_check_and_send_link_address(mgr, mstb_child);
-                               drm_dp_put_mst_branch_device(mstb_child);
+                               drm_dp_mst_topology_put_mstb(mstb_child);
                        }
                }
        }
@@ -1375,16 +1783,19 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
        struct drm_dp_mst_branch *mstb;
+       int ret;
 
        mutex_lock(&mgr->lock);
        mstb = mgr->mst_primary;
        if (mstb) {
-               kref_get(&mstb->kref);
+               ret = drm_dp_mst_topology_try_get_mstb(mstb);
+               if (!ret)
+                       mstb = NULL;
        }
        mutex_unlock(&mgr->lock);
        if (mstb) {
                drm_dp_check_and_send_link_address(mgr, mstb);
-               drm_dp_put_mst_branch_device(mstb);
+               drm_dp_mst_topology_put_mstb(mstb);
        }
 }
 
@@ -1618,9 +2029,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
        if (ret > 0) {
                int i;
 
-               if (txmsg->reply.reply_type == 1)
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
                        DRM_DEBUG_KMS("link address nak received\n");
-               else {
+               else {
                        DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
                        for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
                                DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
@@ -1669,9 +2080,9 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
        if (ret > 0) {
-               if (txmsg->reply.reply_type == 1)
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
                        DRM_DEBUG_KMS("enum path resources nak received\n");
-               else {
+               else {
                        if (port->port_num != txmsg->reply.u.path_resources.port_number)
                                DRM_ERROR("got incorrect port in response\n");
                        DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
@@ -1695,22 +2106,40 @@ static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm
        return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
 }
 
-static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
-                                                                        struct drm_dp_mst_branch *mstb,
-                                                                        int *port_num)
+/*
+ * Searches upwards in the topology starting from mstb to try to find the
+ * closest available parent of mstb that's still connected to the rest of the
+ * topology. This can be used in order to perform operations like releasing
+ * payloads, where the branch device which owned the payload may no longer be
+ * around and thus would require that the payload on the last living relative
+ * be freed instead.
+ */
+static struct drm_dp_mst_branch *
+drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+                                       struct drm_dp_mst_branch *mstb,
+                                       int *port_num)
 {
        struct drm_dp_mst_branch *rmstb = NULL;
        struct drm_dp_mst_port *found_port;
+
        mutex_lock(&mgr->lock);
-       if (mgr->mst_primary) {
+       if (!mgr->mst_primary)
+               goto out;
+
+       do {
                found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+               if (!found_port)
+                       break;
 
-               if (found_port) {
+               if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
                        rmstb = found_port->parent;
-                       kref_get(&rmstb->kref);
                        *port_num = found_port->port_num;
+               } else {
+                       /* Search again, starting from this parent */
+                       mstb = found_port->parent;
                }
-       }
+       } while (!rmstb);
+out:
        mutex_unlock(&mgr->lock);
        return rmstb;
 }
@@ -1726,19 +2155,15 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
        int i;
 
-       port = drm_dp_get_validated_port_ref(mgr, port);
-       if (!port)
-               return -EINVAL;
-
        port_num = port->port_num;
-       mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+       mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
        if (!mstb) {
-               mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+               mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
+                                                              port->parent,
+                                                              &port_num);
 
-               if (!mstb) {
-                       drm_dp_put_port(port);
+               if (!mstb)
                        return -EINVAL;
-               }
        }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1757,17 +2182,24 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 
        drm_dp_queue_down_tx(mgr, txmsg);
 
+       /*
+        * FIXME: there is a small chance that between getting the last
+        * connected mstb and sending the payload message, the last connected
+        * mstb could also be removed from the topology. In the future, this
+        * needs to be fixed by restarting the
+        * drm_dp_get_last_connected_port_and_mstb() search in the event of a
+        * timeout if the topology is still connected to the system.
+        */
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
        if (ret > 0) {
-               if (txmsg->reply.reply_type == 1) {
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
                        ret = -EINVAL;
-               else
+               else
                        ret = 0;
        }
        kfree(txmsg);
 fail_put:
-       drm_dp_put_mst_branch_device(mstb);
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_mstb(mstb);
        return ret;
 }
 
@@ -1777,13 +2209,13 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
        struct drm_dp_sideband_msg_tx *txmsg;
        int len, ret;
 
-       port = drm_dp_get_validated_port_ref(mgr, port);
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
        if (!port)
                return -EINVAL;
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg) {
-               drm_dp_put_port(port);
+               drm_dp_mst_topology_put_port(port);
                return -ENOMEM;
        }
 
@@ -1793,13 +2225,13 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 
        ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
        if (ret > 0) {
-               if (txmsg->reply.reply_type == 1)
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
                        ret = -EINVAL;
                else
                        ret = 0;
        }
        kfree(txmsg);
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_port(port);
 
        return ret;
 }
@@ -1872,15 +2304,16 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
  */
 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
 {
-       int i, j;
-       int cur_slots = 1;
        struct drm_dp_payload req_payload;
        struct drm_dp_mst_port *port;
+       int i, j;
+       int cur_slots = 1;
 
        mutex_lock(&mgr->payload_lock);
        for (i = 0; i < mgr->max_payloads; i++) {
                struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
                struct drm_dp_payload *payload = &mgr->payloads[i];
+               bool put_port = false;
 
                /* solve the current payloads - compare to the hw ones
                   - update the hw view */
@@ -1888,11 +2321,20 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                if (vcpi) {
                        port = container_of(vcpi, struct drm_dp_mst_port,
                                            vcpi);
-                       port = drm_dp_get_validated_port_ref(mgr, port);
-                       if (!port) {
-                               mutex_unlock(&mgr->payload_lock);
-                               return -EINVAL;
+
+                       /* Validated ports don't matter if we're releasing
+                        * VCPI
+                        */
+                       if (vcpi->num_slots) {
+                               port = drm_dp_mst_topology_get_port_validated(
+                                   mgr, port);
+                               if (!port) {
+                                       mutex_unlock(&mgr->payload_lock);
+                                       return -EINVAL;
+                               }
+                               put_port = true;
                        }
+
                        req_payload.num_slots = vcpi->num_slots;
                        req_payload.vcpi = vcpi->vcpi;
                } else {
@@ -1924,8 +2366,8 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                }
                cur_slots += req_payload.num_slots;
 
-               if (port)
-                       drm_dp_put_port(port);
+               if (put_port)
+                       drm_dp_mst_topology_put_port(port);
        }
 
        for (i = 0; i < mgr->max_payloads; i++) {
@@ -2024,7 +2466,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
        struct drm_dp_sideband_msg_tx *txmsg;
        struct drm_dp_mst_branch *mstb;
 
-       mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+       mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
        if (!mstb)
                return -EINVAL;
 
@@ -2041,14 +2483,14 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
        if (ret > 0) {
-               if (txmsg->reply.reply_type == 1) {
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
                        ret = -EINVAL;
-               else
+               else
                        ret = 0;
        }
        kfree(txmsg);
 fail_put:
-       drm_dp_put_mst_branch_device(mstb);
+       drm_dp_mst_topology_put_mstb(mstb);
        return ret;
 }
 
@@ -2056,7 +2498,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
 {
        struct drm_dp_sideband_msg_reply_body reply;
 
-       reply.reply_type = 0;
+       reply.reply_type = DP_SIDEBAND_REPLY_ACK;
        reply.req_type = req_type;
        drm_dp_encode_sideband_reply(&reply, msg);
        return 0;
@@ -2158,7 +2600,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 
                /* give this the main reference */
                mgr->mst_primary = mstb;
-               kref_get(&mgr->mst_primary->kref);
+               drm_dp_mst_topology_get_mstb(mgr->mst_primary);
 
                ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
                                                         DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
@@ -2192,7 +2634,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 out_unlock:
        mutex_unlock(&mgr->lock);
        if (mstb)
-               drm_dp_put_mst_branch_device(mstb);
+               drm_dp_mst_topology_put_mstb(mstb);
        return ret;
 
 }
@@ -2357,18 +2799,23 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
                               mgr->down_rep_recv.initial_hdr.lct,
                                      mgr->down_rep_recv.initial_hdr.rad[0],
                                      mgr->down_rep_recv.msg[0]);
-                       drm_dp_put_mst_branch_device(mstb);
+                       drm_dp_mst_topology_put_mstb(mstb);
                        memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
                        return 0;
                }
 
                drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
-               if (txmsg->reply.reply_type == 1) {
-                       DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
-               }
+
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+                       DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
+                                     txmsg->reply.req_type,
+                                     drm_dp_mst_req_type_str(txmsg->reply.req_type),
+                                     txmsg->reply.u.nak.reason,
+                                     drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
+                                     txmsg->reply.u.nak.nak_data);
 
                memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
-               drm_dp_put_mst_branch_device(mstb);
+               drm_dp_mst_topology_put_mstb(mstb);
 
                mutex_lock(&mgr->qlock);
                txmsg->state = DRM_DP_SIDEBAND_TX_RX;
@@ -2441,7 +2888,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
                }
 
                if (mstb)
-                       drm_dp_put_mst_branch_device(mstb);
+                       drm_dp_mst_topology_put_mstb(mstb);
 
                memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
        }
@@ -2501,7 +2948,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
        enum drm_connector_status status = connector_status_disconnected;
 
        /* we need to search for the port in the mgr in case its gone */
-       port = drm_dp_get_validated_port_ref(mgr, port);
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
        if (!port)
                return connector_status_disconnected;
 
@@ -2526,7 +2973,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
                break;
        }
 out:
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_port(port);
        return status;
 }
 EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -2543,11 +2990,11 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
 {
        bool ret = false;
 
-       port = drm_dp_get_validated_port_ref(mgr, port);
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
        if (!port)
                return ret;
        ret = port->has_audio;
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_port(port);
        return ret;
 }
 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
@@ -2567,7 +3014,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
        struct edid *edid = NULL;
 
        /* we need to search for the port in the mgr in case its gone */
-       port = drm_dp_get_validated_port_ref(mgr, port);
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
        if (!port)
                return NULL;
 
@@ -2578,7 +3025,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
                drm_connector_set_tile_property(connector);
        }
        port->has_audio = drm_detect_monitor_audio(edid);
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_port(port);
        return edid;
 }
 EXPORT_SYMBOL(drm_dp_mst_get_edid);
@@ -2629,43 +3076,98 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
 }
 
 /**
- * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
+ * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
  * @state: global atomic state
  * @mgr: MST topology manager for the port
  * @port: port to find vcpi slots for
  * @pbn: bandwidth required for the mode in PBN
  *
- * RETURNS:
- * Total slots in the atomic state assigned for this port or error
+ * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
+ * may have had. Any atomic drivers which support MST must call this function
+ * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
+ * current VCPI allocation for the new state, but only when
+ * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
+ * to ensure compatibility with userspace applications that still use the
+ * legacy modesetting UAPI.
+ *
+ * Allocations set by this function are not checked against the bandwidth
+ * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
+ *
+ * Additionally, it is OK to call this function multiple times on the same
+ * @port as needed. It is not OK however, to call this function and
+ * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
+ *
+ * See also:
+ * drm_dp_atomic_release_vcpi_slots()
+ * drm_dp_mst_atomic_check()
+ *
+ * Returns:
+ * Total slots in the atomic state assigned for this port, or a negative error
+ * code if the port no longer exists
  */
 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
                                  struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_dp_mst_port *port, int pbn)
 {
        struct drm_dp_mst_topology_state *topology_state;
-       int req_slots;
+       struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
+       int prev_slots, req_slots, ret;
 
        topology_state = drm_atomic_get_mst_topology_state(state, mgr);
        if (IS_ERR(topology_state))
                return PTR_ERR(topology_state);
 
-       port = drm_dp_get_validated_port_ref(mgr, port);
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
        if (port == NULL)
                return -EINVAL;
-       req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
-       DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
-                       req_slots, topology_state->avail_slots);
 
-       if (req_slots > topology_state->avail_slots) {
-               drm_dp_put_port(port);
-               return -ENOSPC;
+       /* Find the current allocation for this port, if any */
+       list_for_each_entry(pos, &topology_state->vcpis, next) {
+               if (pos->port == port) {
+                       vcpi = pos;
+                       prev_slots = vcpi->vcpi;
+
+                       /*
+                        * This should never happen, unless the driver tries
+                        * releasing and allocating the same VCPI allocation,
+                        * which is an error
+                        */
+                       if (WARN_ON(!prev_slots)) {
+                               DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
+                                         port);
+                               return -EINVAL;
+                       }
+
+                       break;
+               }
        }
+       if (!vcpi)
+               prev_slots = 0;
+
+       req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
+                        port->connector->base.id, port->connector->name,
+                        port, prev_slots, req_slots);
 
-       topology_state->avail_slots -= req_slots;
-       DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
+       /* Add the new allocation to the state */
+       if (!vcpi) {
+               vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
+               if (!vcpi) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               drm_dp_mst_get_port_malloc(port);
+               vcpi->port = port;
+               list_add(&vcpi->next, &topology_state->vcpis);
+       }
+       vcpi->vcpi = req_slots;
 
-       drm_dp_put_port(port);
-       return req_slots;
+       ret = req_slots;
+out:
+       drm_dp_mst_topology_put_port(port);
+       return ret;
 }
 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
 
@@ -2673,31 +3175,57 @@ EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
  * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
  * @state: global atomic state
  * @mgr: MST topology manager for the port
- * @slots: number of vcpi slots to release
+ * @port: The port to release the VCPI slots from
  *
- * RETURNS:
- * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
- * negative error code
+ * Releases any VCPI slots that have been allocated to a port in the atomic
+ * state. Any atomic drivers which support MST must call this function in
+ * their &drm_connector_helper_funcs.atomic_check() callback when the
+ * connector will no longer have VCPI allocated (e.g. because it's CRTC was
+ * removed) when it had VCPI allocated in the previous atomic state.
+ *
+ * It is OK to call this even if @port has been removed from the system.
+ * Additionally, it is OK to call this function multiple times on the same
+ * @port as needed. It is not OK however, to call this function and
+ * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
+ * phase.
+ *
+ * See also:
+ * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_mst_atomic_check()
+ *
+ * Returns:
+ * 0 if all slots for this port were added back to
+ * &drm_dp_mst_topology_state.avail_slots or negative error code
  */
 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
                                     struct drm_dp_mst_topology_mgr *mgr,
-                                    int slots)
+                                    struct drm_dp_mst_port *port)
 {
        struct drm_dp_mst_topology_state *topology_state;
+       struct drm_dp_vcpi_allocation *pos;
+       bool found = false;
 
        topology_state = drm_atomic_get_mst_topology_state(state, mgr);
        if (IS_ERR(topology_state))
                return PTR_ERR(topology_state);
 
-       /* We cannot rely on port->vcpi.num_slots to update
-        * topology_state->avail_slots as the port may not exist if the parent
-        * branch device was unplugged. This should be fixed by tracking
-        * per-port slot allocation in drm_dp_mst_topology_state instead of
-        * depending on the caller to tell us how many slots to release.
-        */
-       topology_state->avail_slots += slots;
-       DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
-                       slots, topology_state->avail_slots);
+       list_for_each_entry(pos, &topology_state->vcpis, next) {
+               if (pos->port == port) {
+                       found = true;
+                       break;
+               }
+       }
+       if (WARN_ON(!found)) {
+               DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
+                         port, &topology_state->base);
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
+       if (pos->vcpi) {
+               drm_dp_mst_put_port_malloc(port);
+               pos->vcpi = 0;
+       }
 
        return 0;
 }
@@ -2715,7 +3243,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
 {
        int ret;
 
-       port = drm_dp_get_validated_port_ref(mgr, port);
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
        if (!port)
                return false;
 
@@ -2723,9 +3251,10 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
                return false;
 
        if (port->vcpi.vcpi > 0) {
-               DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+               DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
+                             port->vcpi.vcpi, port->vcpi.pbn, pbn);
                if (pbn == port->vcpi.pbn) {
-                       drm_dp_put_port(port);
+                       drm_dp_mst_topology_put_port(port);
                        return true;
                }
        }
@@ -2733,13 +3262,15 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
        ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
        if (ret) {
                DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
-                               DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
+                             DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
                goto out;
        }
        DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
-                       pbn, port->vcpi.num_slots);
+                     pbn, port->vcpi.num_slots);
 
-       drm_dp_put_port(port);
+       /* Keep port allocated until it's payload has been removed */
+       drm_dp_mst_get_port_malloc(port);
+       drm_dp_mst_topology_put_port(port);
        return true;
 out:
        return false;
@@ -2749,12 +3280,12 @@ EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
 {
        int slots = 0;
-       port = drm_dp_get_validated_port_ref(mgr, port);
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
        if (!port)
                return slots;
 
        slots = port->vcpi.num_slots;
-       drm_dp_put_port(port);
+       drm_dp_mst_topology_put_port(port);
        return slots;
 }
 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
@@ -2768,11 +3299,12 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
  */
 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
 {
-       port = drm_dp_get_validated_port_ref(mgr, port);
-       if (!port)
-               return;
+       /*
+        * A port with VCPI will remain allocated until it's VCPI is
+        * released, no verified ref needed
+        */
+
        port->vcpi.num_slots = 0;
-       drm_dp_put_port(port);
 }
 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
 
@@ -2781,18 +3313,20 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
  * @mgr: manager for this port
  * @port: unverified port to deallocate vcpi for
  */
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+                               struct drm_dp_mst_port *port)
 {
-       port = drm_dp_get_validated_port_ref(mgr, port);
-       if (!port)
-               return;
+       /*
+        * A port with VCPI will remain allocated until it's VCPI is
+        * released, no verified ref needed
+        */
 
        drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
        port->vcpi.num_slots = 0;
        port->vcpi.pbn = 0;
        port->vcpi.aligned_pbn = 0;
        port->vcpi.vcpi = 0;
-       drm_dp_put_port(port);
+       drm_dp_mst_put_port_malloc(port);
 }
 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
 
@@ -3076,13 +3610,6 @@ static void drm_dp_tx_work(struct work_struct *work)
        mutex_unlock(&mgr->qlock);
 }
 
-static void drm_dp_free_mst_port(struct kref *kref)
-{
-       struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
-       kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
-       kfree(port);
-}
-
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -3103,7 +3630,6 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
-               kref_init(&port->kref);
                INIT_LIST_HEAD(&port->next);
 
                mgr->cbs->destroy_connector(mgr, port->connector);
@@ -3111,13 +3637,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                drm_dp_port_teardown_pdt(port, port->pdt);
                port->pdt = DP_PEER_DEVICE_NONE;
 
-               if (!port->input && port->vcpi.vcpi > 0) {
-                       drm_dp_mst_reset_vcpi_slots(mgr, port);
-                       drm_dp_update_payload_part1(mgr);
-                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-               }
-
-               kref_put(&port->kref, drm_dp_free_mst_port);
+               drm_dp_mst_put_port_malloc(port);
                send_hotplug = true;
        }
        if (send_hotplug)
@@ -3127,15 +3647,41 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
 static struct drm_private_state *
 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
 {
-       struct drm_dp_mst_topology_state *state;
+       struct drm_dp_mst_topology_state *state, *old_state =
+               to_dp_mst_topology_state(obj->state);
+       struct drm_dp_vcpi_allocation *pos, *vcpi;
 
-       state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+       state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
        if (!state)
                return NULL;
 
        __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 
+       INIT_LIST_HEAD(&state->vcpis);
+
+       list_for_each_entry(pos, &old_state->vcpis, next) {
+               /* Prune leftover freed VCPI allocations */
+               if (!pos->vcpi)
+                       continue;
+
+               vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
+               if (!vcpi)
+                       goto fail;
+
+               drm_dp_mst_get_port_malloc(vcpi->port);
+               list_add(&vcpi->next, &state->vcpis);
+       }
+
        return &state->base;
+
+fail:
+       list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
+               drm_dp_mst_put_port_malloc(pos->port);
+               kfree(pos);
+       }
+       kfree(state);
+
+       return NULL;
 }
 
 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
@@ -3143,14 +3689,99 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
 {
        struct drm_dp_mst_topology_state *mst_state =
                to_dp_mst_topology_state(state);
+       struct drm_dp_vcpi_allocation *pos, *tmp;
+
+       list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
+               /* We only keep references to ports with non-zero VCPIs */
+               if (pos->vcpi)
+                       drm_dp_mst_put_port_malloc(pos->port);
+               kfree(pos);
+       }
 
        kfree(mst_state);
 }
 
-static const struct drm_private_state_funcs mst_state_funcs = {
+static inline int
+drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
+                                      struct drm_dp_mst_topology_state *mst_state)
+{
+       struct drm_dp_vcpi_allocation *vcpi;
+       int avail_slots = 63, payload_count = 0;
+
+       list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+               /* Releasing VCPI is always OK-even if the port is gone */
+               if (!vcpi->vcpi) {
+                       DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
+                                        vcpi->port);
+                       continue;
+               }
+
+               DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
+                                vcpi->port, vcpi->vcpi);
+
+               avail_slots -= vcpi->vcpi;
+               if (avail_slots < 0) {
+                       DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
+                                        vcpi->port, mst_state,
+                                        avail_slots + vcpi->vcpi);
+                       return -ENOSPC;
+               }
+
+               if (++payload_count > mgr->max_payloads) {
+                       DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
+                                        mgr, mst_state, mgr->max_payloads);
+                       return -EINVAL;
+               }
+       }
+       DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
+                        mgr, mst_state, avail_slots,
+                        63 - avail_slots);
+
+       return 0;
+}
+
+/**
+ * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
+ * atomic update is valid
+ * @state: Pointer to the new &struct drm_dp_mst_topology_state
+ *
+ * Checks the given topology state for an atomic update to ensure that it's
+ * valid. This includes checking whether there's enough bandwidth to support
+ * the new VCPI allocations in the atomic update.
+ *
+ * Any atomic drivers supporting DP MST must make sure to call this after
+ * checking the rest of their state in their
+ * &drm_mode_config_funcs.atomic_check() callback.
+ *
+ * See also:
+ * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_atomic_release_vcpi_slots()
+ *
+ * Returns:
+ *
+ * 0 if the new state is valid, negative error code otherwise.
+ */
+int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
+{
+       struct drm_dp_mst_topology_mgr *mgr;
+       struct drm_dp_mst_topology_state *mst_state;
+       int i, ret = 0;
+
+       for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+               ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_check);
+
+const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
        .atomic_duplicate_state = drm_dp_mst_duplicate_state,
        .atomic_destroy_state = drm_dp_mst_destroy_state,
 };
+EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
 
 /**
  * drm_atomic_get_mst_topology_state: get MST topology state
@@ -3228,13 +3859,11 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
                return -ENOMEM;
 
        mst_state->mgr = mgr;
-
-       /* max. time slots - one slot for MTP header */
-       mst_state->avail_slots = 63;
+       INIT_LIST_HEAD(&mst_state->vcpis);
 
        drm_atomic_private_obj_init(dev, &mgr->base,
                                    &mst_state->base,
-                                   &mst_state_funcs);
+                                   &drm_dp_mst_topology_state_funcs);
 
        return 0;
 }
@@ -3292,7 +3921,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
        struct drm_dp_sideband_msg_tx *txmsg = NULL;
        int ret;
 
-       mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+       mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
        if (!mstb)
                return -EREMOTEIO;
 
@@ -3329,7 +3958,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
        if (ret > 0) {
 
-               if (txmsg->reply.reply_type == 1) { /* got a NAK back */
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
                        ret = -EREMOTEIO;
                        goto out;
                }
@@ -3342,7 +3971,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
        }
 out:
        kfree(txmsg);
-       drm_dp_put_mst_branch_device(mstb);
+       drm_dp_mst_topology_put_mstb(mstb);
        return ret;
 }
 
index a5fe91b8c3c9b7ed3d1bed59d2abef08af57acc6..381581b01d485e581df8bcebcd6983b01bc8a488 100644 (file)
@@ -264,14 +264,13 @@ void drm_minor_release(struct drm_minor *minor)
  * DOC: driver instance overview
  *
  * A device instance for a drm driver is represented by &struct drm_device. This
- * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
+ * is initialized with drm_dev_init(), usually from bus-specific ->probe()
  * callbacks implemented by the driver. The driver then needs to initialize all
  * the various subsystems for the drm device like memory management, vblank
  * handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. An important part of this is
- * also calling drm_dev_set_unique() to set the userspace-visible unique name of
- * this device instance. Finally when everything is up and running and ready for
- * userspace the device instance can be published using drm_dev_register().
+ * initialize all the corresponding hardware bits. Finally when everything is up
+ * and running and ready for userspace the device instance can be published
+ * using drm_dev_register().
  *
  * There is also deprecated support for initalizing device instances using
  * bus-specific helpers and the &drm_driver.load callback. But due to
@@ -287,9 +286,6 @@ void drm_minor_release(struct drm_minor *minor)
  * Note that the lifetime rules for &drm_device instance has still a lot of
  * historical baggage. Hence use the reference counting provided by
  * drm_dev_get() and drm_dev_put() only carefully.
- *
- * It is recommended that drivers embed &struct drm_device into their own device
- * structure, which is supported through drm_dev_init().
  */
 
 /**
@@ -475,6 +471,9 @@ static void drm_fs_inode_free(struct inode *inode)
  * The initial ref-count of the object is 1. Use drm_dev_get() and
  * drm_dev_put() to take and drop further ref-counts.
  *
+ * It is recommended that drivers embed &struct drm_device into their own device
+ * structure.
+ *
  * Drivers that do not want to allocate their own device struct
  * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
  * that do embed &struct drm_device it must be placed first in the overall
@@ -765,7 +764,7 @@ static void remove_compat_control_link(struct drm_device *dev)
  * @flags: Flags passed to the driver's .load() function
  *
  * Register the DRM device @dev with the system, advertise device to user-space
- * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * and start normal device operation. @dev must be initialized via drm_dev_init()
  * previously.
  *
  * Never call this twice on any device!
@@ -877,9 +876,9 @@ EXPORT_SYMBOL(drm_dev_unregister);
  * @dev: device of which to set the unique name
  * @name: unique name
  *
- * Sets the unique name of a DRM device using the specified string. Drivers
- * can use this at driver probe time if the unique name of the devices they
- * drive is static.
+ * Sets the unique name of a DRM device using the specified string. This is
+ * already done by drm_dev_init(), drivers should only override the default
+ * unique name for backwards compatibility reasons.
  *
  * Return: 0 on success or a negative error code on failure.
  */
index b506e3622b08f64d5e1bb6d9ece2c716580883a8..990b1909f9d721f276e2a257afe8efe9de8376e0 100644 (file)
@@ -3641,6 +3641,20 @@ static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
        return oui == HDMI_FORUM_IEEE_OUI;
 }
 
+static bool cea_db_is_vcdb(const u8 *db)
+{
+       if (cea_db_tag(db) != USE_EXTENDED_TAG)
+               return false;
+
+       if (cea_db_payload_len(db) != 2)
+               return false;
+
+       if (cea_db_extended_tag(db) != EXT_VIDEO_CAPABILITY_BLOCK)
+               return false;
+
+       return true;
+}
+
 static bool cea_db_is_y420cmdb(const u8 *db)
 {
        if (cea_db_tag(db) != USE_EXTENDED_TAG)
@@ -4223,41 +4237,6 @@ end:
 }
 EXPORT_SYMBOL(drm_detect_monitor_audio);
 
-/**
- * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
- * @edid: EDID block to scan
- *
- * Check whether the monitor reports the RGB quantization range selection
- * as supported. The AVI infoframe can then be used to inform the monitor
- * which quantization range (full or limited) is used.
- *
- * Return: True if the RGB quantization range is selectable, false otherwise.
- */
-bool drm_rgb_quant_range_selectable(struct edid *edid)
-{
-       u8 *edid_ext;
-       int i, start, end;
-
-       edid_ext = drm_find_cea_extension(edid);
-       if (!edid_ext)
-               return false;
-
-       if (cea_db_offsets(edid_ext, &start, &end))
-               return false;
-
-       for_each_cea_db(edid_ext, i, start, end) {
-               if (cea_db_tag(&edid_ext[i]) == USE_EXTENDED_TAG &&
-                   cea_db_payload_len(&edid_ext[i]) == 2 &&
-                   cea_db_extended_tag(&edid_ext[i]) ==
-                       EXT_VIDEO_CAPABILITY_BLOCK) {
-                       DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
-                       return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
-               }
-       }
-
-       return false;
-}
-EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 
 /**
  * drm_default_rgb_quant_range - default RGB quantization range
@@ -4278,6 +4257,16 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode)
 }
 EXPORT_SYMBOL(drm_default_rgb_quant_range);
 
+static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
+{
+       struct drm_display_info *info = &connector->display_info;
+
+       DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]);
+
+       if (db[2] & EDID_CEA_VCDB_QS)
+               info->rgb_quant_range_selectable = true;
+}
+
 static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
                                               const u8 *db)
 {
@@ -4452,6 +4441,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
                        drm_parse_hdmi_forum_vsdb(connector, db);
                if (cea_db_is_y420cmdb(db))
                        drm_parse_y420cmdb_bitmap(connector, db);
+               if (cea_db_is_vcdb(db))
+                       drm_parse_vcdb(connector, db);
        }
 }
 
@@ -4472,6 +4463,7 @@ drm_reset_display_info(struct drm_connector *connector)
        info->max_tmds_clock = 0;
        info->dvi_dual = false;
        info->has_hdmi_infoframe = false;
+       info->rgb_quant_range_selectable = false;
        memset(&info->hdmi, 0, sizeof(info->hdmi));
 
        info->non_desktop = 0;
@@ -4830,19 +4822,32 @@ void drm_set_preferred_mode(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_set_preferred_mode);
 
+static bool is_hdmi2_sink(struct drm_connector *connector)
+{
+       /*
+        * FIXME: sil-sii8620 doesn't have a connector around when
+        * we need one, so we have to be prepared for a NULL connector.
+        */
+       if (!connector)
+               return true;
+
+       return connector->display_info.hdmi.scdc.supported ||
+               connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
+}
+
 /**
  * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
  *                                              data from a DRM display mode
  * @frame: HDMI AVI infoframe
+ * @connector: the connector
  * @mode: DRM display mode
- * @is_hdmi2_sink: Sink is HDMI 2.0 compliant
  *
  * Return: 0 on success or a negative error code on failure.
  */
 int
 drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
-                                        const struct drm_display_mode *mode,
-                                        bool is_hdmi2_sink)
+                                        struct drm_connector *connector,
+                                        const struct drm_display_mode *mode)
 {
        enum hdmi_picture_aspect picture_aspect;
        int err;
@@ -4864,7 +4869,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
         * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
         * have to make sure we dont break HDMI 1.4 sinks.
         */
-       if (!is_hdmi2_sink && frame->video_code > 64)
+       if (!is_hdmi2_sink(connector) && frame->video_code > 64)
                frame->video_code = 0;
 
        /*
@@ -4923,22 +4928,18 @@ EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
  * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
  *                                        quantization range information
  * @frame: HDMI AVI infoframe
+ * @connector: the connector
  * @mode: DRM display mode
  * @rgb_quant_range: RGB quantization range (Q)
- * @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS)
- * @is_hdmi2_sink: HDMI 2.0 sink, which has different default recommendations
- *
- * Note that @is_hdmi2_sink can be derived by looking at the
- * &drm_scdc.supported flag stored in &drm_hdmi_info.scdc,
- * &drm_display_info.hdmi, which can be found in &drm_connector.display_info.
  */
 void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
+                                  struct drm_connector *connector,
                                   const struct drm_display_mode *mode,
-                                  enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable,
-                                  bool is_hdmi2_sink)
+                                  enum hdmi_quantization_range rgb_quant_range)
 {
+       const struct drm_display_info *info = &connector->display_info;
+
        /*
         * CEA-861:
         * "A Source shall not send a non-zero Q value that does not correspond
@@ -4949,7 +4950,7 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
         * HDMI 2.0 recommends sending non-zero Q when it does match the
         * default RGB quantization range for the mode, even when QS=0.
         */
-       if (rgb_quant_range_selectable ||
+       if (info->rgb_quant_range_selectable ||
            rgb_quant_range == drm_default_rgb_quant_range(mode))
                frame->quantization_range = rgb_quant_range;
        else
@@ -4968,7 +4969,7 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
         * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
         * on on CEA-861-F.
         */
-       if (!is_hdmi2_sink ||
+       if (!is_hdmi2_sink(connector) ||
            rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
                frame->ycc_quantization_range =
                        HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
index 5b516615881a0029c54a8fad9db2696f02406b80..5f8074ffe7d9d5fe7ec47c3a3dbc4847ddd0c44e 100644 (file)
  * GNU General Public License for more details.
  */
 
-#include <drm/drmP.h>
-#include <drm/drm_client.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_print.h>
+#include <drm/drm_plane.h>
 #include <linux/module.h>
 
-struct drm_fbdev_cma {
-       struct drm_fb_helper    fb_helper;
-};
-
 /**
  * DOC: framebuffer cma helper functions
  *
@@ -39,16 +32,8 @@ struct drm_fbdev_cma {
  *
  * drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
  * callback function to create a cma backed framebuffer.
- *
- * An fbdev framebuffer backed by cma is also available by calling
- * drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down.
  */
 
-static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
-{
-       return container_of(helper, struct drm_fbdev_cma, fb_helper);
-}
-
 /**
  * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
  * @fb: The framebuffer
@@ -119,121 +104,3 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
        return paddr;
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
-
-/**
- * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- *                 @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors.
- *                  @dev->mode_config.num_connector is used if this is zero.
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
-                         unsigned int max_conn_count)
-{
-       struct drm_fbdev_cma *fbdev_cma;
-
-       /* dev->fb_helper will indirectly point to fbdev_cma after this call */
-       fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
-       return PTR_ERR_OR_ZERO(fbdev_cma);
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
-
-/**
- * drm_fb_cma_fbdev_fini() - Teardown fbdev emulation
- * @dev: DRM device
- */
-void drm_fb_cma_fbdev_fini(struct drm_device *dev)
-{
-       if (dev->fb_helper)
-               drm_fbdev_cma_fini(to_fbdev_cma(dev->fb_helper));
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini);
-
-static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
-       .fb_probe = drm_fb_helper_generic_probe,
-};
-
-/**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device
- * @max_conn_count: Maximum number of connectors
- *
- * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
- */
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
-       unsigned int preferred_bpp, unsigned int max_conn_count)
-{
-       struct drm_fbdev_cma *fbdev_cma;
-       struct drm_fb_helper *fb_helper;
-       int ret;
-
-       fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
-       if (!fbdev_cma)
-               return ERR_PTR(-ENOMEM);
-
-       fb_helper = &fbdev_cma->fb_helper;
-
-       ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
-       if (ret)
-               goto err_free;
-
-       ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_cma_helper_funcs,
-                                       preferred_bpp, max_conn_count);
-       if (ret)
-               goto err_client_put;
-
-       drm_client_add(&fb_helper->client);
-
-       return fbdev_cma;
-
-err_client_put:
-       drm_client_release(&fb_helper->client);
-err_free:
-       kfree(fbdev_cma);
-
-       return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
-
-/**
- * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
- * @fbdev_cma: The drm_fbdev_cma struct
- */
-void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
-{
-       drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
-       /* All resources have now been freed by drm_fbdev_fb_destroy() */
-}
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
-
-/**
- * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
- * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
- *
- * This function is usually called from the &drm_driver.lastclose callback.
- */
-void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
-{
-       if (fbdev_cma)
-               drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
-}
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
-
-/**
- * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
- * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
- *
- * This function is usually called from the &drm_mode_config.output_poll_changed
- * callback.
- */
-void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
-{
-       if (fbdev_cma)
-               drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
-}
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
index d3af098b0922320f2c8be1db7d779ae91538618d..c5c79986f9c565deacde7d780ff190209d6904d7 100644 (file)
@@ -1797,6 +1797,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        int i;
        struct drm_fb_helper_surface_size sizes;
        int gamma_size = 0;
+       int best_depth = 0;
 
        memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
        sizes.surface_depth = 24;
@@ -1804,7 +1805,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        sizes.fb_width = (u32)-1;
        sizes.fb_height = (u32)-1;
 
-       /* if driver picks 8 or 16 by default use that for both depth/bpp */
+       /*
+        * If driver picks 8 or 16 by default use that for both depth/bpp
+        * to begin with
+        */
        if (preferred_bpp != sizes.surface_bpp)
                sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
 
@@ -1839,6 +1843,55 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                }
        }
 
+       /*
+        * If we run into a situation where, for example, the primary plane
+        * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth
+        * 16) we need to scale down the depth of the sizes we request.
+        */
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+               struct drm_crtc *crtc = mode_set->crtc;
+               struct drm_plane *plane = crtc->primary;
+               int j;
+
+               DRM_DEBUG("test CRTC %d primary plane\n", i);
+
+               for (j = 0; j < plane->format_count; j++) {
+                       const struct drm_format_info *fmt;
+
+                       fmt = drm_format_info(plane->format_types[j]);
+
+                       /*
+                        * Do not consider YUV or other complicated formats
+                        * for framebuffers. This means only legacy formats
+                        * are supported (fmt->depth is a legacy field) but
+                        * the framebuffer emulation can only deal with such
+                        * formats, specifically RGB/BGA formats.
+                        */
+                       if (fmt->depth == 0)
+                               continue;
+
+                       /* We found a perfect fit, great */
+                       if (fmt->depth == sizes.surface_depth) {
+                               best_depth = fmt->depth;
+                               break;
+                       }
+
+                       /* Skip depths above what we're looking for */
+                       if (fmt->depth > sizes.surface_depth)
+                               continue;
+
+                       /* Best depth found so far */
+                       if (fmt->depth > best_depth)
+                               best_depth = fmt->depth;
+               }
+       }
+       if (sizes.surface_depth != best_depth) {
+               DRM_INFO("requested bpp %d, scaled depth down to %d",
+                        sizes.surface_bpp, best_depth);
+               sizes.surface_depth = best_depth;
+       }
+
        crtc_count = 0;
        for (i = 0; i < fb_helper->crtc_count; i++) {
                struct drm_display_mode *desired_mode;
@@ -2866,7 +2919,7 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev,
        return 0;
 
 err_drm_fb_helper_fini:
-       drm_fb_helper_fini(fb_helper);
+       drm_fb_helper_fbdev_teardown(dev);
 
        return ret;
 }
@@ -2961,18 +3014,16 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
        return 0;
 }
 
-/*
- * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
- * unregister_framebuffer() or fb_release().
- */
-static void drm_fbdev_fb_destroy(struct fb_info *info)
+static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
 {
-       struct drm_fb_helper *fb_helper = info->par;
        struct fb_info *fbi = fb_helper->fbdev;
        struct fb_ops *fbops = NULL;
        void *shadow = NULL;
 
-       if (fbi->fbdefio) {
+       if (!fb_helper->dev)
+               return;
+
+       if (fbi && fbi->fbdefio) {
                fb_deferred_io_cleanup(fbi);
                shadow = fbi->screen_buffer;
                fbops = fbi->fbops;
@@ -2986,15 +3037,22 @@ static void drm_fbdev_fb_destroy(struct fb_info *info)
        }
 
        drm_client_framebuffer_delete(fb_helper->buffer);
-       /*
-        * FIXME:
-        * Remove conditional when all CMA drivers have been moved over to using
-        * drm_fbdev_generic_setup().
-        */
-       if (fb_helper->client.funcs) {
-               drm_client_release(&fb_helper->client);
-               kfree(fb_helper);
-       }
+}
+
+static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
+{
+       drm_fbdev_cleanup(fb_helper);
+       drm_client_release(&fb_helper->client);
+       kfree(fb_helper);
+}
+
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+ * unregister_framebuffer() or fb_release().
+ */
+static void drm_fbdev_fb_destroy(struct fb_info *info)
+{
+       drm_fbdev_release(info->par);
 }
 
 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
@@ -3047,7 +3105,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
        struct drm_framebuffer *fb;
        struct fb_info *fbi;
        u32 format;
-       int ret;
 
        DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
                      sizes->surface_width, sizes->surface_height,
@@ -3064,10 +3121,8 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
        fb = buffer->fb;
 
        fbi = drm_fb_helper_alloc_fbi(fb_helper);
-       if (IS_ERR(fbi)) {
-               ret = PTR_ERR(fbi);
-               goto err_free_buffer;
-       }
+       if (IS_ERR(fbi))
+               return PTR_ERR(fbi);
 
        fbi->par = fb_helper;
        fbi->fbops = &drm_fbdev_fb_ops;
@@ -3098,8 +3153,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
                if (!fbops || !shadow) {
                        kfree(fbops);
                        vfree(shadow);
-                       ret = -ENOMEM;
-                       goto err_fb_info_destroy;
+                       return -ENOMEM;
                }
 
                *fbops = *fbi->fbops;
@@ -3111,13 +3165,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
        }
 
        return 0;
-
-err_fb_info_destroy:
-       drm_fb_helper_fini(fb_helper);
-err_free_buffer:
-       drm_client_framebuffer_delete(buffer);
-
-       return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_generic_probe);
 
@@ -3129,25 +3176,16 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
 {
        struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
 
-       if (fb_helper->fbdev) {
-               drm_fb_helper_unregister_fbi(fb_helper);
+       if (fb_helper->fbdev)
                /* drm_fbdev_fb_destroy() takes care of cleanup */
-               return;
-       }
-
-       /* Did drm_fb_helper_fbdev_setup() run? */
-       if (fb_helper->dev)
-               drm_fb_helper_fini(fb_helper);
-
-       drm_client_release(client);
-       kfree(fb_helper);
+               drm_fb_helper_unregister_fbi(fb_helper);
+       else
+               drm_fbdev_release(fb_helper);
 }
 
 static int drm_fbdev_client_restore(struct drm_client_dev *client)
 {
-       struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
-       drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       drm_fb_helper_lastclose(client->dev);
 
        return 0;
 }
@@ -3158,7 +3196,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
        struct drm_device *dev = client->dev;
        int ret;
 
-       /* If drm_fb_helper_fbdev_setup() failed, we only try once */
+       /* Setup is not retried if it has failed */
        if (!fb_helper->dev && fb_helper->funcs)
                return 0;
 
@@ -3170,15 +3208,34 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
                return 0;
        }
 
-       ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
-                                       fb_helper->preferred_bpp, 0);
-       if (ret) {
-               fb_helper->dev = NULL;
-               fb_helper->fbdev = NULL;
-               return ret;
-       }
+       drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
+
+       ret = drm_fb_helper_init(dev, fb_helper, dev->mode_config.num_connector);
+       if (ret)
+               goto err;
+
+       ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+       if (ret)
+               goto err_cleanup;
+
+       if (!drm_drv_uses_atomic_modeset(dev))
+               drm_helper_disable_unused_functions(dev);
+
+       ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
+       if (ret)
+               goto err_cleanup;
 
        return 0;
+
+err_cleanup:
+       drm_fbdev_cleanup(fb_helper);
+err:
+       fb_helper->dev = NULL;
+       fb_helper->fbdev = NULL;
+
+       DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
+
+       return ret;
 }
 
 static const struct drm_client_funcs drm_fbdev_client_funcs = {
@@ -3237,6 +3294,10 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 
        drm_client_add(&fb_helper->client);
 
+       if (!preferred_bpp)
+               preferred_bpp = dev->mode_config.preferred_depth;
+       if (!preferred_bpp)
+               preferred_bpp = 32;
        fb_helper->preferred_bpp = preferred_bpp;
 
        ret = drm_fbdev_client_hotplug(&fb_helper->client);
index 12dea16f22a843bd5012ef46b5690e983ed94b12..3da3bf5af40543bca1ff06da352ad038e7257d8e 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_util.h>
 #include <drm/drm_flip_work.h>
 
 /**
index fcaea8f50513d845b8d736d7d3df0092dc33d67b..7abcb265a108dd3d2eb3133aa7ef275714adc15e 100644 (file)
@@ -27,6 +27,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_uapi.h>
 #include <drm/drm_print.h>
+#include <drm/drm_util.h>
 
 #include "drm_internal.h"
 #include "drm_crtc_internal.h"
index 8b55ece97967f43d0861d6d5f6dfc043f00b9a39..2896ff60552f5d873933ddedbb576df5a9e10b2c 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
 #include <linux/mem_encrypt.h>
+#include <linux/pagevec.h>
 #include <drm/drmP.h>
 #include <drm/drm_vma_manager.h>
 #include <drm/drm_gem.h>
@@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 }
 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void drm_gem_check_release_pagevec(struct pagevec *pvec)
+{
+       check_move_unevictable_pages(pvec);
+       __pagevec_release(pvec);
+       cond_resched();
+}
+
 /**
  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  * from shmem
@@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 {
        struct address_space *mapping;
        struct page *p, **pages;
+       struct pagevec pvec;
        int i, npages;
 
        /* This is the shared memory object that backs the GEM resource */
@@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
        if (pages == NULL)
                return ERR_PTR(-ENOMEM);
 
+       mapping_set_unevictable(mapping);
+
        for (i = 0; i < npages; i++) {
                p = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(p))
@@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
        return pages;
 
 fail:
-       while (i--)
-               put_page(pages[i]);
+       mapping_clear_unevictable(mapping);
+       pagevec_init(&pvec);
+       while (i--) {
+               if (!pagevec_add(&pvec, pages[i]))
+                       drm_gem_check_release_pagevec(&pvec);
+       }
+       if (pagevec_count(&pvec))
+               drm_gem_check_release_pagevec(&pvec);
 
        kvfree(pages);
        return ERR_CAST(p);
@@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
                bool dirty, bool accessed)
 {
        int i, npages;
+       struct address_space *mapping;
+       struct pagevec pvec;
+
+       mapping = file_inode(obj->filp)->i_mapping;
+       mapping_clear_unevictable(mapping);
 
        /* We already BUG_ON() for non-page-aligned sizes in
         * drm_gem_object_init(), so we should never hit this unless
@@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 
        npages = obj->size >> PAGE_SHIFT;
 
+       pagevec_init(&pvec);
        for (i = 0; i < npages; i++) {
                if (dirty)
                        set_page_dirty(pages[i]);
@@ -622,8 +649,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
                        mark_page_accessed(pages[i]);
 
                /* Undo the reference we took when populating the table */
-               put_page(pages[i]);
+               if (!pagevec_add(&pvec, pages[i]))
+                       drm_gem_check_release_pagevec(&pvec);
        }
+       if (pagevec_count(&pvec))
+               drm_gem_check_release_pagevec(&pvec);
 
        kvfree(pages);
 }
index acb466d25afc28b545240149710d425ba094e9af..65edb1ccb185f01840e2395436ab51159ee67dce 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
@@ -136,10 +137,9 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
  * @mode_cmd: Metadata from the userspace framebuffer creation request
  * @funcs: vtable to be used for the new framebuffer object
  *
- * This can be used to set &drm_framebuffer_funcs for drivers that need the
- * &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
- * need to change &drm_framebuffer_funcs.
- * The function does buffer size validation.
+ * This function can be used to set &drm_framebuffer_funcs for drivers that need
+ * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
+ * change &drm_framebuffer_funcs. The function does buffer size validation.
  *
  * Returns:
  * Pointer to a &drm_framebuffer on success or an error pointer on failure.
@@ -215,8 +215,8 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
  *
  * If your hardware has special alignment or pitch requirements these should be
  * checked before calling this function. The function does buffer size
- * validation. Use drm_gem_fb_create_with_funcs() if you need to set
- * &drm_framebuffer_funcs.dirty.
+ * validation. Use drm_gem_fb_create_with_dirty() if you need framebuffer
+ * flushing.
  *
  * Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
  * The ADDFB2 IOCTL calls into this callback.
@@ -233,6 +233,44 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_create);
 
+static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
+       .destroy        = drm_gem_fb_destroy,
+       .create_handle  = drm_gem_fb_create_handle,
+       .dirty          = drm_atomic_helper_dirtyfb,
+};
+
+/**
+ * drm_gem_fb_create_with_dirty() - Helper function for the
+ *                       &drm_mode_config_funcs.fb_create callback
+ * @dev: DRM device
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ *
+ * This function creates a new framebuffer object described by
+ * &drm_mode_fb_cmd2. This description includes handles for the buffer(s)
+ * backing the framebuffer. drm_atomic_helper_dirtyfb() is used for the dirty
+ * callback giving framebuffer flushing through the atomic machinery. Use
+ * drm_gem_fb_create() if you don't need the dirty callback.
+ * The function does buffer size validation.
+ *
+ * Drivers should also call drm_plane_enable_fb_damage_clips() on all planes
+ * to enable userspace to use damage clips also with the ATOMIC IOCTL.
+ *
+ * Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
+ * The ADDFB2 IOCTL calls into this callback.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+                            const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+                                           &drm_gem_fb_funcs_dirtyfb);
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
+
 /**
  * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
  * @plane: Plane
index 45a07652fa00e886bc11ae6dcf5766439ee0901a..9bd8908d5fd83e4e43546bc83f07c788d49f3c37 100644 (file)
@@ -103,9 +103,6 @@ int drm_irq_install(struct drm_device *dev, int irq)
        int ret;
        unsigned long sh_flags = 0;
 
-       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-               return -EOPNOTSUPP;
-
        if (irq == 0)
                return -EINVAL;
 
@@ -123,8 +120,8 @@ int drm_irq_install(struct drm_device *dev, int irq)
        if (dev->driver->irq_preinstall)
                dev->driver->irq_preinstall(dev);
 
-       /* Install handler */
-       if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+       /* PCI devices require shared interrupts. */
+       if (dev->pdev)
                sh_flags = IRQF_SHARED;
 
        ret = request_irq(irq, dev->driver->irq_handler,
@@ -174,9 +171,6 @@ int drm_irq_uninstall(struct drm_device *dev)
        bool irq_enabled;
        int i;
 
-       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-               return -EOPNOTSUPP;
-
        irq_enabled = dev->irq_enabled;
        dev->irq_enabled = false;
 
index 24a7504365596c32c60333ad04e25ba1c26306bf..adce9a26bac944bcbe9bec875bb95778b0ac7cc8 100644 (file)
@@ -71,11 +71,6 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev)
        if (!nmode)
                return NULL;
 
-       if (drm_mode_object_add(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
-               kfree(nmode);
-               return NULL;
-       }
-
        return nmode;
 }
 EXPORT_SYMBOL(drm_mode_create);
@@ -92,8 +87,6 @@ void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
        if (!mode)
                return;
 
-       drm_mode_object_unregister(dev, &mode->base);
-
        kfree(mode);
 }
 EXPORT_SYMBOL(drm_mode_destroy);
@@ -911,11 +904,9 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
  */
 void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
 {
-       int id = dst->base.id;
        struct list_head head = dst->head;
 
        *dst = *src;
-       dst->base.id = id;
        dst->head = head;
 }
 EXPORT_SYMBOL(drm_mode_copy);
index 9150fa385bba7d212dce1b1da55d677d47f1e765..890eee07892d89626cd9982da2944bdfabede868 100644 (file)
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_modeset_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 /**
  * DOC: aux kms helpers
index 2763a5ec845b0c14bff58b8aed4d8cb58d80d0a9..f2f71d71494a276411ff4d29e4884bbd8b558a99 100644 (file)
@@ -217,9 +217,11 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
 }
 EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
 
-/*
+/**
  * drm_of_find_panel_or_bridge - return connected panel or bridge device
  * @np: device tree node containing encoder output ports
+ * @port: port in the device tree node
+ * @endpoint: endpoint in the device tree node
  * @panel: pointer to hold returned drm_panel
  * @bridge: pointer to hold returned drm_bridge
  *
index c33f95e08e1b8bc8c6e987a95bc4ff011215618f..dbd5b873e8f2f268a6c3b07b70a82d4d7906dc66 100644 (file)
@@ -36,6 +36,9 @@ static LIST_HEAD(panel_list);
  * The DRM panel helpers allow drivers to register panel objects with a
  * central registry and provide functions to retrieve those panels in display
  * drivers.
+ *
+ * For easy integration into drivers using the &drm_bridge infrastructure please
+ * take look at drm_panel_bridge_add() and devm_drm_panel_bridge_add().
  */
 
 /**
index 5f650d8fc66b7a9514db4be47b937871f28d900f..4cfb56893b7f2e50deed2334ec904389a2c33f43 100644 (file)
@@ -220,6 +220,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
                        format_modifier_count++;
        }
 
+       if (format_modifier_count)
+               config->allow_fb_modifiers = true;
+
        plane->modifier_count = format_modifier_count;
        plane->modifiers = kmalloc_array(format_modifier_count,
                                         sizeof(format_modifiers[0]),
index a1bb157bfdfaeb9bad32c5f39b200970547ec039..6fd08e04b3231ea13169fe7c97d891f821d97e77 100644 (file)
 #include <drm/drm_client.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
 
 #include "drm_crtc_helper_internal.h"
 
index 917812448d1ba954b7a84ff6a33ba04a988ef2fb..a32f14cd7398c06e97cad5ad749d183a9cba734b 100644 (file)
@@ -10,8 +10,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 #include <linux/slab.h>
 
index 98e0911759217578a0cfaf9e299026a2eca4237b..cde71ee95a8f04ebd1b6ec25703092360b3fa9fc 100644 (file)
@@ -105,13 +105,20 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
        write_sequnlock(&vblank->seqlock);
 }
 
+static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+       return vblank->max_vblank_count ?: dev->max_vblank_count;
+}
+
 /*
  * "No hw counter" fallback implementation of .get_vblank_counter() hook,
  * if there is no useable hardware frame counter available.
  */
 static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
 {
-       WARN_ON_ONCE(dev->max_vblank_count != 0);
+       WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0);
        return 0;
 }
 
@@ -198,6 +205,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
        ktime_t t_vblank;
        int count = DRM_TIMESTAMP_MAXRETRIES;
        int framedur_ns = vblank->framedur_ns;
+       u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
 
        /*
         * Interrupts were disabled prior to this call, so deal with counter
@@ -216,9 +224,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
                rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
        } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
 
-       if (dev->max_vblank_count != 0) {
+       if (max_vblank_count) {
                /* trust the hw counter when it's around */
-               diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
+               diff = (cur_vblank - vblank->last) & max_vblank_count;
        } else if (rc && framedur_ns) {
                u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
 
@@ -1204,6 +1212,37 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_vblank_reset);
 
+/**
+ * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value
+ * @crtc: CRTC in question
+ * @max_vblank_count: max hardware vblank counter value
+ *
+ * Update the maximum hardware vblank counter value for @crtc
+ * at runtime. Useful for hardware where the operation of the
+ * hardware vblank counter depends on the currently active
+ * display configuration.
+ *
+ * For example, if the hardware vblank counter does not work
+ * when a specific connector is active the maximum can be set
+ * to zero. And when that specific connector isn't active the
+ * maximum can again be set to the appropriate non-zero value.
+ *
+ * If used, must be called before drm_vblank_on().
+ */
+void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
+                                  u32 max_vblank_count)
+{
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = drm_crtc_index(crtc);
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+       WARN_ON(dev->max_vblank_count);
+       WARN_ON(!READ_ONCE(vblank->inmodeset));
+
+       vblank->max_vblank_count = max_vblank_count;
+}
+EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
+
 /**
  * drm_crtc_vblank_on - enable vblank events on a CRTC
  * @crtc: CRTC in question
index 4bf698de599696f0462ee50af4f7b5ff59fc199b..a6a7ded37ef1d0678bd8fb1acad77012c841ddb8 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/mm_types.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/etnaviv_drm.h>
index c8449ae4f4feda409f29c700f361b0560eaac981..471242a5e5809aacb62ace95e73cedd037dcc46f 100644 (file)
 #include <video/videomode.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include <drm/bridge/analogix_dp.h>
 #include <drm/exynos_drm.h>
index 2696289ecc78f204fb504f24c4f897694acb41df..96ee83a798c4cffd3f984e12f72cc91abecfd740 100644 (file)
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_encoder.h>
+#include <drm/drm_probe_helper.h>
 
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
index 2f0babb67c5104be6f1e617d5baa57a64d74499c..ae425c9a3f7b8faf831a9567854b7061ebd63fea 100644 (file)
@@ -11,9 +11,9 @@
 */
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_panel.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
index 2c75e789b2a7e9faf80aa9c6eac3ef5b65fa98e6..e1ef9dc9ebf3f13e07f8a3c0be0bcfdf94e4a01f 100644 (file)
@@ -15,8 +15,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/component.h>
 
index d81e62ae286aea79d39757ecb2233608b905d75f..a4253dd55f86dd04bf9d22a40fba2615f5bfae01 100644 (file)
 #include <asm/unaligned.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/clk.h>
 #include <linux/gpio/consumer.h>
index 31eb538a44ae1950c7f94fa9b994abb154b525df..1f11ab0f8e9daffdaecc04f6b1ace3d799652145 100644 (file)
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <uapi/drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
index ce9604ca8041d7b03a783e1cad743a7480074cc3..c30dd88cdb257655b90b40813ccdcbfe714a8b3e 100644 (file)
@@ -15,7 +15,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/exynos_drm.h>
 
 #include <linux/console.h>
@@ -88,7 +88,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
        }
 
        fbi->par = helper;
-       fbi->flags = FBINFO_FLAG_DEFAULT;
        fbi->fbops = &exynos_drm_fb_ops;
 
        drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
index 2fd299a58297edd559b2b5928a63833b3c2622df..dd02e8a323ef524488bf68b7ff01469f44184aa5 100644 (file)
@@ -246,8 +246,8 @@ already_disabled:
 }
 
 static void mic_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode)
+                        const struct drm_display_mode *mode,
+                        const struct drm_display_mode *adjusted_mode)
 {
        struct exynos_mic *mic = bridge->driver_private;
 
index 8d67b2a54be3b66f68e008d07a6203140bf2f7f4..05abfed6f7f899d919cd4152c085d33473a154f7 100644 (file)
@@ -356,6 +356,11 @@ static int rotator_runtime_resume(struct device *dev)
 }
 #endif
 
+static const struct drm_exynos_ipp_limit rotator_s5pv210_rbg888_limits[] = {
+       { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
+       { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
+};
+
 static const struct drm_exynos_ipp_limit rotator_4210_rbg888_limits[] = {
        { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
        { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
@@ -371,6 +376,11 @@ static const struct drm_exynos_ipp_limit rotator_5250_rbg888_limits[] = {
        { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
 };
 
+static const struct drm_exynos_ipp_limit rotator_s5pv210_yuv_limits[] = {
+       { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) },
+       { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
+};
+
 static const struct drm_exynos_ipp_limit rotator_4210_yuv_limits[] = {
        { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) },
        { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
@@ -381,6 +391,11 @@ static const struct drm_exynos_ipp_limit rotator_4412_yuv_limits[] = {
        { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
 };
 
+static const struct exynos_drm_ipp_formats rotator_s5pv210_formats[] = {
+       { IPP_SRCDST_FORMAT(XRGB8888, rotator_s5pv210_rbg888_limits) },
+       { IPP_SRCDST_FORMAT(NV12, rotator_s5pv210_yuv_limits) },
+};
+
 static const struct exynos_drm_ipp_formats rotator_4210_formats[] = {
        { IPP_SRCDST_FORMAT(XRGB8888, rotator_4210_rbg888_limits) },
        { IPP_SRCDST_FORMAT(NV12, rotator_4210_yuv_limits) },
@@ -396,6 +411,11 @@ static const struct exynos_drm_ipp_formats rotator_5250_formats[] = {
        { IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
 };
 
+static const struct rot_variant rotator_s5pv210_data = {
+       .formats = rotator_s5pv210_formats,
+       .num_formats = ARRAY_SIZE(rotator_s5pv210_formats),
+};
+
 static const struct rot_variant rotator_4210_data = {
        .formats = rotator_4210_formats,
        .num_formats = ARRAY_SIZE(rotator_4210_formats),
@@ -413,6 +433,9 @@ static const struct rot_variant rotator_5250_data = {
 
 static const struct of_device_id exynos_rotator_match[] = {
        {
+               .compatible = "samsung,s5pv210-rotator",
+               .data = &rotator_s5pv210_data,
+       }, {
                .compatible = "samsung,exynos4210-rotator",
                .data = &rotator_4210_data,
        }, {
index 71270efa64f3f35e7d983488b13202192936b245..ed1dd1aec902ae8200484eda7a2a53fbe6531e06 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2017 Samsung Electronics Co.Ltd
  * Author:
- *     Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *     Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index 19697c1362d8facf536a55485c45b2b2aaae6a85..29f4c1932aedb572cecf5e92e6775d151a98d77c 100644 (file)
@@ -19,9 +19,9 @@
 
 #include <drm/exynos_drm.h>
 
-#include <drm/drm_edid.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
index 2092a650df7d56b1262fc2f09036eb88ec62ce38..8e2c02fc66e87fc2725a9cc116f82a0ce662ab3f 100644 (file)
@@ -15,9 +15,9 @@
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #include "regs-hdmi.h"
 
@@ -819,7 +819,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
                return;
        }
 
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, m, false);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+                                                      &hdata->connector, m);
        if (!ret)
                ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
        if (ret > 0) {
index fc7ccad75e74ace8402cad50d47fc14928e466c3..512a2baced11650acec70d4e8393929b0e77722b 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright (c) 2017 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
  *
  * Register definition file for Samsung scaler driver
  *
index 18afc94e4dff89416bcdd16180a573e2f29a1470..bf256971063d15b7d7ed5f65fc4bbdbfc50115f6 100644 (file)
@@ -16,7 +16,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <video/videomode.h>
 
 #include "fsl_dcu_drm_crtc.h"
index ceddc3e29258fdd8ed9b5346e94dcafae4b0d280..dfc73aade32581c5306b74a87f00cc50affb233d 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_modeset_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "fsl_dcu_drm_crtc.h"
 #include "fsl_dcu_drm_drv.h"
@@ -137,7 +137,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
 DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
 
 static struct drm_driver fsl_dcu_drm_driver = {
-       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET
                                | DRIVER_PRIME | DRIVER_ATOMIC,
        .load                   = fsl_dcu_load,
        .unload                 = fsl_dcu_unload,
index ddc68e476a4db81ed32259cb9b27e8d58f74e2d6..e447f7d0c304127f7255c8b2d46a936160222c6e 100644 (file)
@@ -11,9 +11,9 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "fsl_dcu_drm_crtc.h"
 #include "fsl_dcu_drm_drv.h"
index 9554b245746ebaef31d698c702903e7c40980289..2a9e8a82c06a13d6ba4afabe409ab9f2c8ed880f 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "fsl_dcu_drm_drv.h"
 #include "fsl_dcu_drm_plane.h"
index 2298ed2a9e1c02f8e3c37d7f6761a23f0a470ca3..0a3a62b082403bc67f178038b870ce0414ae2a75 100644 (file)
@@ -14,9 +14,9 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "fsl_dcu_drm_drv.h"
 #include "fsl_tcon.h"
index adefae58b5fcd6e09d513aed236c12d220e406c5..c934b3df1f8112c886c8f79cbd13606ea34d6cf5 100644 (file)
@@ -405,7 +405,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
        strcpy(info->fix.id, "psbdrmfb");
 
-       info->flags = FBINFO_DEFAULT;
        if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
                info->fbops = &psbfb_ops;
        else if (gtt_roll) {    /* GTT rolling seems best */
index ac32ab5aa002705c69d0636fff461951fd4c283d..eefaf4daff2b83c1bfb196710e67c144bb94bb83 100644 (file)
@@ -468,8 +468,7 @@ static const struct file_operations psb_gem_fops = {
 };
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
-                          DRIVER_MODESET | DRIVER_GEM,
+       .driver_features = DRIVER_MODESET | DRIVER_GEM,
        .load = psb_driver_load,
        .unload = psb_driver_unload,
        .lastclose = drm_fb_helper_lastclose,
index e05e5399af2db518959143992fbe21dc16dd0c11..8280a923b916ac4d12311808ebae63b0ac30c067 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/gpio.h>
 #include "gma_display.h"
 
index a956545774a39225702ac05243769ed7cc3e76af..9316b724e7a2684a1337cd9eeff6a98599805cdd 100644 (file)
@@ -18,8 +18,8 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "hibmc_drm_drv.h"
 #include "hibmc_drm_regs.h"
index 68c0c297b3a53b2961fa46a55e197d14d3dc389d..8ed94fcd42a7fdc0a0bf0aaa0fd9ea05df98e9a8 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/module.h>
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "hibmc_drm_drv.h"
 #include "hibmc_drm_regs.h"
@@ -56,8 +56,7 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
 }
 
 static struct drm_driver hibmc_driver = {
-       .driver_features        = DRIVER_GEM | DRIVER_MODESET |
-                                 DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &hibmc_fops,
        .name                   = "hibmc",
        .date                   = "20160828",
index edcca17615001cb4ba37e7ac3601d731189f36c8..de9d7cc97e449d7e791f78c6022a9f600bea5ff8 100644 (file)
@@ -17,8 +17,8 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "hibmc_drm_drv.h"
 
index 744956cea7496afd8ec2672bd953eaf67ef661c3..d2cf7317930a50f7986008baf70790f88e0cf50f 100644 (file)
@@ -17,7 +17,7 @@
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "hibmc_drm_drv.h"
 #include "hibmc_drm_regs.h"
index b4c7af3ab6ae422b1d56a0e4cf0f2cee4c783445..3d6c45097f5199494c896ca38761f3fa093b3815 100644 (file)
 
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
 
-#include <drm/drm_of.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_encoder_slave.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
 
 #include "dw_dsi_reg.h"
 
index bb774202a5a1bc41d01dc6f8369db37822149f48..73611a92d96c82854d2f43282723131edf564d94 100644 (file)
 #include <linux/reset.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_crtc.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "kirin_drm_drv.h"
 #include "kirin_ade_reg.h"
index e6a62d5a00a3af0164a113ffc971304a80f5cbb5..7cb7c042b93ff3f1332eb0e0878611079a7b44e2 100644 (file)
 #include <linux/of_graph.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include "kirin_drm_drv.h"
 
@@ -33,32 +34,15 @@ static struct kirin_dc_ops *dc_ops;
 
 static int kirin_drm_kms_cleanup(struct drm_device *dev)
 {
-       struct kirin_drm_private *priv = dev->dev_private;
-
-       if (priv->fbdev) {
-               drm_fbdev_cma_fini(priv->fbdev);
-               priv->fbdev = NULL;
-       }
-
        drm_kms_helper_poll_fini(dev);
        dc_ops->cleanup(to_platform_device(dev->dev));
        drm_mode_config_cleanup(dev);
-       devm_kfree(dev->dev, priv);
-       dev->dev_private = NULL;
 
        return 0;
 }
 
-static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
-{
-       struct kirin_drm_private *priv = dev->dev_private;
-
-       drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
 static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
        .fb_create = drm_gem_fb_create,
-       .output_poll_changed = kirin_fbdev_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
@@ -76,14 +60,8 @@ static void kirin_drm_mode_config_init(struct drm_device *dev)
 
 static int kirin_drm_kms_init(struct drm_device *dev)
 {
-       struct kirin_drm_private *priv;
        int ret;
 
-       priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       dev->dev_private = priv;
        dev_set_drvdata(dev->dev, dev);
 
        /* dev->mode_config initialization */
@@ -117,26 +95,14 @@ static int kirin_drm_kms_init(struct drm_device *dev)
        /* init kms poll for handling hpd */
        drm_kms_helper_poll_init(dev);
 
-       priv->fbdev = drm_fbdev_cma_init(dev, 32,
-                                        dev->mode_config.num_connector);
-
-       if (IS_ERR(priv->fbdev)) {
-               DRM_ERROR("failed to initialize fbdev.\n");
-               ret = PTR_ERR(priv->fbdev);
-               goto err_cleanup_poll;
-       }
        return 0;
 
-err_cleanup_poll:
-       drm_kms_helper_poll_fini(dev);
 err_unbind_all:
        component_unbind_all(dev->dev, dev);
 err_dc_cleanup:
        dc_ops->cleanup(to_platform_device(dev->dev));
 err_mode_config_cleanup:
        drm_mode_config_cleanup(dev);
-       devm_kfree(dev->dev, priv);
-       dev->dev_private = NULL;
 
        return ret;
 }
@@ -199,6 +165,8 @@ static int kirin_drm_bind(struct device *dev)
        if (ret)
                goto err_kms_cleanup;
 
+       drm_fbdev_generic_setup(drm_dev, 32);
+
        return 0;
 
 err_kms_cleanup:
index 56cb62df065cfbfc9245bc876c3e0e38b301b7c7..ad027d1cc826697ecd222de74badea79e1211034 100644 (file)
@@ -19,10 +19,6 @@ struct kirin_dc_ops {
        void (*cleanup)(struct platform_device *pdev);
 };
 
-struct kirin_drm_private {
-       struct drm_fbdev_cma *fbdev;
-};
-
 extern const struct kirin_dc_ops ade_dc_ops;
 
 #endif /* __KIRIN_DRM_DRV_H__ */
index 544a8a2d3562c2bd6f173be3e5cee0d80f9c2696..b91e48d2190d216e1f73363443b30ca5ed66bba2 100644 (file)
@@ -359,10 +359,10 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
        if (modes_changed) {
                drm_helper_probe_single_connector_modes(connector, 0, 0);
 
-               /* Disable the crtc to ensure a full modeset is
-                * performed whenever it's turned on again. */
                if (crtc)
-                       drm_crtc_force_disable(crtc);
+                       drm_crtc_helper_set_mode(crtc, &crtc->mode,
+                                                crtc->x, crtc->y,
+                                                crtc->primary->fb);
        }
 
        return 0;
index dc6414af5d79eec1e38a35e6b045a1e8f282f048..b6e091935977698c5961cb6bf264e9fb66a6c989 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder_slave.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/i2c/ch7006.h>
 
 typedef int64_t fixed;
index c52d7a3af786e6c1c0bac8a0a8ab40f72bb998e2..878ba8d06ce2c485d977ea0ef85b46acb132770a 100644 (file)
@@ -27,8 +27,8 @@
 #include <linux/module.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder_slave.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/i2c/sil164.h>
 
 struct sil164_priv {
index a7c39f39793ff2c2ce152e3cba3c3036513a7f19..7f34601bb5155719eac38a729cbd4a70d5c12f4c 100644 (file)
@@ -26,9 +26,9 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/i2c/tda998x.h>
 
 #include <media/cec-notifier.h>
@@ -845,11 +845,12 @@ static int tda998x_write_aif(struct tda998x_priv *priv,
 }
 
 static void
-tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
+tda998x_write_avi(struct tda998x_priv *priv, const struct drm_display_mode *mode)
 {
        union hdmi_infoframe frame;
 
-       drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+       drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                &priv->connector, mode);
        frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
 
        tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
@@ -1122,7 +1123,6 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs tda998x_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
        .reset = drm_atomic_helper_connector_reset,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = tda998x_connector_detect,
@@ -1339,8 +1339,8 @@ static void tda998x_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adjusted_mode)
+                                   const struct drm_display_mode *mode,
+                                   const struct drm_display_mode *adjusted_mode)
 {
        struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
        unsigned long tmds_clock;
index 9e36ffb5eb7cdd049140ab9e4eeccc53c1020813..ad4d71161dda0f430f0346249ad33a89c857daa8 100644 (file)
@@ -21,11 +21,11 @@ config DRM_I915_DEBUG
         select DEBUG_FS
         select PREEMPT_COUNT
         select I2C_CHARDEV
+        select STACKDEPOT
         select DRM_DP_AUX_CHARDEV
         select X86_MSR # used by igt/pm_rpm
         select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
         select DRM_DEBUG_MM if DRM=y
-        select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
        select DRM_DEBUG_SELFTEST
        select SW_SYNC # signaling validation framework (igt/syncobj*)
        select DRM_I915_SW_FENCE_DEBUG_OBJECTS
@@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM
        bool "Enable extra state checking for runtime PM"
        depends on DRM_I915
        default n
+       select STACKDEPOT
        help
          Choose this option to turn on extra state checking for the
          runtime PM functionality. This may introduce overhead during
index 19b5fe5016bf6617394da4d0e795d62e8d49a857..1787e1299b1b2a8ec6647473aa64c4c03efd71f0 100644 (file)
@@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
 subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
 subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
 subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
+subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
 
 # Fine grained warnings disable
@@ -40,9 +41,10 @@ i915-y := i915_drv.o \
          i915_mm.o \
          i915_params.o \
          i915_pci.o \
-          i915_suspend.o \
-         i915_syncmap.o \
+         i915_reset.o \
+         i915_suspend.o \
          i915_sw_fence.o \
+         i915_syncmap.o \
          i915_sysfs.o \
          intel_csr.o \
          intel_device_info.o \
@@ -55,7 +57,9 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
 i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
 
 # GEM code
-i915-y += i915_cmd_parser.o \
+i915-y += \
+         i915_active.o \
+         i915_cmd_parser.o \
          i915_gem_batch_pool.o \
          i915_gem_clflush.o \
          i915_gem_context.o \
@@ -166,6 +170,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
        selftests/i915_random.o \
        selftests/i915_selftest.o \
        selftests/igt_flush_test.o \
+       selftests/igt_live_test.o \
        selftests/igt_reset.o \
        selftests/igt_spinner.o
 
@@ -198,3 +203,4 @@ endif
 i915-y += intel_lpe_audio.o
 
 obj-$(CONFIG_DRM_I915) += i915.o
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
index 5e6a3013da49645ebf6ea0b1487fc383fc43d3b1..16e0345b711fb3cc782125aa418e804d2e09ab50 100644 (file)
@@ -24,7 +24,6 @@
 #define _INTEL_DVO_H
 
 #include <linux/i2c.h>
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include "intel_drv.h"
 
index b016dc753db96774a57770b7af4d93cf66ecf997..271fb46d4dd0df3fbce52eb93097562027e86aed 100644 (file)
@@ -7,4 +7,3 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
 
 ccflags-y                              += -I$(src) -I$(src)/$(GVT_DIR)
 i915-y                                 += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
-obj-$(CONFIG_DRM_I915_GVT_KVMGT)       += $(GVT_DIR)/kvmgt.o
index 359d37d5c958c6b258053ba62804762f75d00087..1fa2f65c3cd16f38651f14a40e12f2d58e82af3d 100644 (file)
@@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
        }
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put_unchecked(dev_priv);
 }
 
 static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
@@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
        _clear_vgpu_fence(vgpu);
 
        mutex_unlock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put_unchecked(dev_priv);
        return 0;
 out_free_fence:
        gvt_vgpu_err("Failed to alloc fences\n");
@@ -219,7 +219,7 @@ out_free_fence:
                vgpu->fence.regs[i] = NULL;
        }
        mutex_unlock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put_unchecked(dev_priv);
        return -ENOSPC;
 }
 
@@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
 
        intel_runtime_pm_get(dev_priv);
        _clear_vgpu_fence(vgpu);
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put_unchecked(dev_priv);
 }
 
 /**
index 77ae634eb11c9e709751b9391a8f392ad9632385..35b4ec3f7618b887e5661d0d652cca99b6ed02c6 100644 (file)
@@ -55,10 +55,10 @@ struct sub_op_bits {
        int low;
 };
 struct decode_info {
-       char *name;
+       const char *name;
        int op_len;
        int nr_sub_op;
-       struct sub_op_bits *sub_op;
+       const struct sub_op_bits *sub_op;
 };
 
 #define   MAX_CMD_BUDGET                       0x7fffffff
@@ -375,7 +375,7 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
 #define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
 
 struct cmd_info {
-       char *name;
+       const char *name;
        u32 opcode;
 
 #define F_LEN_MASK     (1U<<0)
@@ -399,10 +399,10 @@ struct cmd_info {
 #define R_VECS (1 << VECS)
 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
        /* rings that support this cmd: BLT/RCS/VCS/VECS */
-       uint16_t rings;
+       u16 rings;
 
        /* devices that support this cmd: SNB/IVB/HSW/... */
-       uint16_t devices;
+       u16 devices;
 
        /* which DWords are address that need fix up.
         * bit 0 means a 32-bit non address operand in command
@@ -412,20 +412,20 @@ struct cmd_info {
         * No matter the address length, each address only takes
         * one bit in the bitmap.
         */
-       uint16_t addr_bitmap;
+       u16 addr_bitmap;
 
        /* flag == F_LEN_CONST : command length
         * flag == F_LEN_VAR : length bias bits
         * Note: length is in DWord
         */
-       uint8_t len;
+       u8 len;
 
        parser_cmd_handler handler;
 };
 
 struct cmd_entry {
        struct hlist_node hlist;
-       struct cmd_info *info;
+       const struct cmd_info *info;
 };
 
 enum {
@@ -474,7 +474,7 @@ struct parser_exec_state {
        int saved_buf_addr_type;
        bool is_ctx_wa;
 
-       struct cmd_info *info;
+       const struct cmd_info *info;
 
        struct intel_vgpu_workload *workload;
 };
@@ -485,12 +485,12 @@ struct parser_exec_state {
 static unsigned long bypass_scan_mask = 0;
 
 /* ring ALL, type = 0 */
-static struct sub_op_bits sub_op_mi[] = {
+static const struct sub_op_bits sub_op_mi[] = {
        {31, 29},
        {28, 23},
 };
 
-static struct decode_info decode_info_mi = {
+static const struct decode_info decode_info_mi = {
        "MI",
        OP_LEN_MI,
        ARRAY_SIZE(sub_op_mi),
@@ -498,12 +498,12 @@ static struct decode_info decode_info_mi = {
 };
 
 /* ring RCS, command type 2 */
-static struct sub_op_bits sub_op_2d[] = {
+static const struct sub_op_bits sub_op_2d[] = {
        {31, 29},
        {28, 22},
 };
 
-static struct decode_info decode_info_2d = {
+static const struct decode_info decode_info_2d = {
        "2D",
        OP_LEN_2D,
        ARRAY_SIZE(sub_op_2d),
@@ -511,14 +511,14 @@ static struct decode_info decode_info_2d = {
 };
 
 /* ring RCS, command type 3 */
-static struct sub_op_bits sub_op_3d_media[] = {
+static const struct sub_op_bits sub_op_3d_media[] = {
        {31, 29},
        {28, 27},
        {26, 24},
        {23, 16},
 };
 
-static struct decode_info decode_info_3d_media = {
+static const struct decode_info decode_info_3d_media = {
        "3D_Media",
        OP_LEN_3D_MEDIA,
        ARRAY_SIZE(sub_op_3d_media),
@@ -526,7 +526,7 @@ static struct decode_info decode_info_3d_media = {
 };
 
 /* ring VCS, command type 3 */
-static struct sub_op_bits sub_op_mfx_vc[] = {
+static const struct sub_op_bits sub_op_mfx_vc[] = {
        {31, 29},
        {28, 27},
        {26, 24},
@@ -534,7 +534,7 @@ static struct sub_op_bits sub_op_mfx_vc[] = {
        {20, 16},
 };
 
-static struct decode_info decode_info_mfx_vc = {
+static const struct decode_info decode_info_mfx_vc = {
        "MFX_VC",
        OP_LEN_MFX_VC,
        ARRAY_SIZE(sub_op_mfx_vc),
@@ -542,7 +542,7 @@ static struct decode_info decode_info_mfx_vc = {
 };
 
 /* ring VECS, command type 3 */
-static struct sub_op_bits sub_op_vebox[] = {
+static const struct sub_op_bits sub_op_vebox[] = {
        {31, 29},
        {28, 27},
        {26, 24},
@@ -550,14 +550,14 @@ static struct sub_op_bits sub_op_vebox[] = {
        {20, 16},
 };
 
-static struct decode_info decode_info_vebox = {
+static const struct decode_info decode_info_vebox = {
        "VEBOX",
        OP_LEN_VEBOX,
        ARRAY_SIZE(sub_op_vebox),
        sub_op_vebox,
 };
 
-static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
        [RCS] = {
                &decode_info_mi,
                NULL,
@@ -616,7 +616,7 @@ static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 
 static inline u32 get_opcode(u32 cmd, int ring_id)
 {
-       struct decode_info *d_info;
+       const struct decode_info *d_info;
 
        d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
        if (d_info == NULL)
@@ -625,7 +625,7 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
        return cmd >> (32 - d_info->op_len);
 }
 
-static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
                unsigned int opcode, int ring_id)
 {
        struct cmd_entry *e;
@@ -638,7 +638,7 @@ static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
        return NULL;
 }
 
-static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
                u32 cmd, int ring_id)
 {
        u32 opcode;
@@ -657,7 +657,7 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
 
 static inline void print_opcode(u32 cmd, int ring_id)
 {
-       struct decode_info *d_info;
+       const struct decode_info *d_info;
        int i;
 
        d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
@@ -776,7 +776,7 @@ static inline int ip_gma_advance(struct parser_exec_state *s,
        return 0;
 }
 
-static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
 {
        if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
                return info->len;
@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
         * It's good enough to support initializing mmio by lri command in
         * vgpu inhibit context on KBL.
         */
-       if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+       if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
+               || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
                        intel_gvt_mmio_is_in_ctx(gvt, offset) &&
                        !strncmp(cmd, "lri", 3)) {
                intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
        if (!info->async_flip)
                return 0;
 
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
                tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
                                GENMASK(12, 10)) >> 10;
@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
 
        set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
                      info->surf_val << 12);
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
                              info->stride_val);
                set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
 
        if (IS_BROADWELL(dev_priv))
                return gen8_decode_mi_display_flip(s, info);
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 9)
                return skl_decode_mi_display_flip(s, info);
 
        return -ENODEV;
@@ -1643,8 +1638,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
 {
        unsigned long gma = 0;
-       struct cmd_info *info;
-       uint32_t cmd_len = 0;
+       const struct cmd_info *info;
+       u32 cmd_len = 0;
        bool bb_end = false;
        struct intel_vgpu *vgpu = s->vgpu;
        u32 cmd;
@@ -1842,7 +1837,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
 
 static int mi_noop_index;
 
-static struct cmd_info cmd_info[] = {
+static const struct cmd_info cmd_info[] = {
        {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
 
        {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
@@ -2521,7 +2516,7 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
 static int cmd_parser_exec(struct parser_exec_state *s)
 {
        struct intel_vgpu *vgpu = s->vgpu;
-       struct cmd_info *info;
+       const struct cmd_info *info;
        u32 cmd;
        int ret = 0;
 
@@ -2683,7 +2678,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
                                        I915_GTT_PAGE_SIZE)))
                return -EINVAL;
 
-       ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+       ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
        ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
                        PAGE_SIZE);
        gma_head = wa_ctx->indirect_ctx.guest_gma;
@@ -2850,7 +2845,7 @@ put_obj:
 
 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
-       uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+       u32 per_ctx_start[CACHELINE_DWORDS] = {0};
        unsigned char *bb_start_sva;
 
        if (!wa_ctx->per_ctx.valid)
@@ -2895,10 +2890,10 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        return 0;
 }
 
-static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
                unsigned int opcode, unsigned long rings)
 {
-       struct cmd_info *info = NULL;
+       const struct cmd_info *info = NULL;
        unsigned int ring;
 
        for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
@@ -2913,7 +2908,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
 {
        int i;
        struct cmd_entry *e;
-       struct cmd_info *info;
+       const struct cmd_info *info;
        unsigned int gen_type;
 
        gen_type = intel_gvt_get_device_type(gvt);
index df1e14145747ca9666dae9c8bdf7273cc69c4b46..035479e273beca866575c4bef70438583029d2df 100644 (file)
@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        SDE_PORTC_HOTPLUG_CPT |
                        SDE_PORTD_HOTPLUG_CPT);
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+           IS_COFFEELAKE(dev_priv)) {
                vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
                                SDE_PORTE_HOTPLUG_SPT);
                vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
        }
 
-       if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+       if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+            IS_COFFEELAKE(dev_priv)) &&
                        intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
                vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
        }
@@ -340,6 +342,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
        port->dpcd->data_valid = true;
        port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
        port->type = type;
+       port->id = resolution;
 
        emulate_monitor_status_change(vgpu);
 
@@ -442,6 +445,36 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
        mutex_unlock(&gvt->lock);
 }
 
+/**
+ * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
+ * @vgpu: a vGPU
+ * @conncted: link state
+ *
+ * This function is used to trigger hotplug interrupt for vGPU
+ *
+ */
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       /* TODO: add more platforms support */
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               if (connected) {
+                       vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+                               SFUSE_STRAP_DDID_DETECTED;
+                       vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+               } else {
+                       vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+                               ~SFUSE_STRAP_DDID_DETECTED;
+                       vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT;
+               }
+               vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT;
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTD_HOTPLUG_STATUS_MASK;
+               intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
+       }
+}
+
 /**
  * intel_vgpu_clean_display - clean vGPU virtual display emulation
  * @vgpu: a vGPU
@@ -453,7 +486,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+           IS_COFFEELAKE(dev_priv))
                clean_virtual_dp_monitor(vgpu, PORT_D);
        else
                clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -476,7 +510,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
 
        intel_vgpu_init_i2c_edid(vgpu);
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+           IS_COFFEELAKE(dev_priv))
                return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
                                                resolution);
        else
index ea7c1c525b8c36f8b57ed6bec9d05d6e13432cce..a87f33e6a23ca46708a6dd321c02aac70104a76e 100644 (file)
@@ -146,18 +146,19 @@ enum intel_vgpu_port_type {
        GVT_PORT_MAX
 };
 
+enum intel_vgpu_edid {
+       GVT_EDID_1024_768,
+       GVT_EDID_1920_1200,
+       GVT_EDID_NUM,
+};
+
 struct intel_vgpu_port {
        /* per display EDID information */
        struct intel_vgpu_edid_data *edid;
        /* per display DPCD information */
        struct intel_vgpu_dpcd_data *dpcd;
        int type;
-};
-
-enum intel_vgpu_edid {
-       GVT_EDID_1024_768,
-       GVT_EDID_1920_1200,
-       GVT_EDID_NUM,
+       enum intel_vgpu_edid id;
 };
 
 static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
@@ -172,6 +173,30 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
        }
 }
 
+static inline unsigned int vgpu_edid_xres(enum intel_vgpu_edid id)
+{
+       switch (id) {
+       case GVT_EDID_1024_768:
+               return 1024;
+       case GVT_EDID_1920_1200:
+               return 1920;
+       default:
+               return 0;
+       }
+}
+
+static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
+{
+       switch (id) {
+       case GVT_EDID_1024_768:
+               return 768;
+       case GVT_EDID_1920_1200:
+               return 1200;
+       default:
+               return 0;
+       }
+}
+
 void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
 
index 51ed99a37803310e2d9b7e84882ae7df97e3b59c..3e7e2b80c8579017cecdda478bc6166e1f46e061 100644 (file)
@@ -29,7 +29,6 @@
  */
 
 #include <linux/dma-buf.h>
-#include <drm/drmP.h>
 #include <linux/vfio.h>
 
 #include "i915_drv.h"
@@ -164,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
 
        obj->read_domains = I915_GEM_DOMAIN_GTT;
        obj->write_domain = 0;
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
index 5d4bb35bb889e932c6b2905ac06e4495d57b0cb4..1fe6124918f1bcc2a97b39e7a811548a7a611e6d 100644 (file)
@@ -77,16 +77,32 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
        return chr;
 }
 
+static inline int cnp_get_port_from_gmbus0(u32 gmbus0)
+{
+       int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+       int port = -EINVAL;
+
+       if (port_select == GMBUS_PIN_1_BXT)
+               port = PORT_B;
+       else if (port_select == GMBUS_PIN_2_BXT)
+               port = PORT_C;
+       else if (port_select == GMBUS_PIN_3_BXT)
+               port = PORT_D;
+       else if (port_select == GMBUS_PIN_4_CNP)
+               port = PORT_E;
+       return port;
+}
+
 static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
 {
        int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
        int port = -EINVAL;
 
-       if (port_select == 1)
+       if (port_select == GMBUS_PIN_1_BXT)
                port = PORT_B;
-       else if (port_select == 2)
+       else if (port_select == GMBUS_PIN_2_BXT)
                port = PORT_C;
-       else if (port_select == 3)
+       else if (port_select == GMBUS_PIN_3_BXT)
                port = PORT_D;
        return port;
 }
@@ -96,13 +112,13 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
        int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
        int port = -EINVAL;
 
-       if (port_select == 2)
+       if (port_select == GMBUS_PIN_VGADDC)
                port = PORT_E;
-       else if (port_select == 4)
+       else if (port_select == GMBUS_PIN_DPC)
                port = PORT_C;
-       else if (port_select == 5)
+       else if (port_select == GMBUS_PIN_DPB)
                port = PORT_B;
-       else if (port_select == 6)
+       else if (port_select == GMBUS_PIN_DPD)
                port = PORT_D;
        return port;
 }
@@ -133,6 +149,8 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
 
        if (IS_BROXTON(dev_priv))
                port = bxt_get_port_from_gmbus0(pin_select);
+       else if (IS_COFFEELAKE(dev_priv))
+               port = cnp_get_port_from_gmbus0(pin_select);
        else
                port = get_port_from_gmbus0(pin_select);
        if (WARN_ON(port < 0))
index 85e6736f0a327742329dc9ee3bc8cf2dce294ed3..65e847392aea788f2feb17c381f75f0defc62709 100644 (file)
@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
        u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
        u32 stride = stride_reg;
 
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                switch (tiled) {
                case PLANE_CTL_TILED_LINEAR:
                        stride = stride_reg * 64;
@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (!plane->enabled)
                return -ENODEV;
 
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                plane->tiled = val & PLANE_CTL_TILED_MASK;
                fmt = skl_format_to_drm(
                        val & PLANE_CTL_FORMAT_MASK,
@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        }
 
        plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
-               (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) ?
+               (INTEL_GEN(dev_priv) >= 9) ?
                        (_PRI_PLANE_STRIDE_MASK >> 6) :
                                _PRI_PLANE_STRIDE_MASK, plane->bpp);
 
index 733a2a0d0c3096ce56feb0a3a00e56e23f4db8ae..43f4242062dd8e613ca4003d743e7ea769bc7f65 100644 (file)
@@ -185,54 +185,9 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .vgpu_query_plane = intel_vgpu_query_plane,
        .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
        .write_protect_handler = intel_vgpu_page_track_handler,
+       .emulate_hotplug = intel_vgpu_emulate_hotplug,
 };
 
-/**
- * intel_gvt_init_host - Load MPT modules and detect if we're running in host
- *
- * This function is called at the driver loading stage. If failed to find a
- * loadable MPT module or detect currently we're running in a VM, then GVT-g
- * will be disabled
- *
- * Returns:
- * Zero on success, negative error code if failed.
- *
- */
-int intel_gvt_init_host(void)
-{
-       if (intel_gvt_host.initialized)
-               return 0;
-
-       /* Xen DOM U */
-       if (xen_domain() && !xen_initial_domain())
-               return -ENODEV;
-
-       /* Try to load MPT modules for hypervisors */
-       if (xen_initial_domain()) {
-               /* In Xen dom0 */
-               intel_gvt_host.mpt = try_then_request_module(
-                               symbol_get(xengt_mpt), "xengt");
-               intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
-       } else {
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
-               /* not in Xen. Try KVMGT */
-               intel_gvt_host.mpt = try_then_request_module(
-                               symbol_get(kvmgt_mpt), "kvmgt");
-               intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
-#endif
-       }
-
-       /* Fail to load MPT modules - bail out */
-       if (!intel_gvt_host.mpt)
-               return -EINVAL;
-
-       gvt_dbg_core("Running with hypervisor %s in host mode\n",
-                       supported_hypervisors[intel_gvt_host.hypervisor_type]);
-
-       intel_gvt_host.initialized = true;
-       return 0;
-}
-
 static void init_device_info(struct intel_gvt *gvt)
 {
        struct intel_gvt_device_info *info = &gvt->device_info;
@@ -316,7 +271,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
                return;
 
        intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-       intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
        intel_gvt_cleanup_vgpu_type_groups(gvt);
        intel_gvt_clean_vgpu_types(gvt);
 
@@ -352,13 +306,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
        struct intel_vgpu *vgpu;
        int ret;
 
-       /*
-        * Cannot initialize GVT device without intel_gvt_host gets
-        * initialized first.
-        */
-       if (WARN_ON(!intel_gvt_host.initialized))
-               return -EINVAL;
-
        if (WARN_ON(dev_priv->gvt))
                return -EEXIST;
 
@@ -420,13 +367,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
                goto out_clean_types;
        }
 
-       ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
-                               &intel_gvt_ops);
-       if (ret) {
-               gvt_err("failed to register gvt-g host device: %d\n", ret);
-               goto out_clean_types;
-       }
-
        vgpu = intel_gvt_create_idle_vgpu(gvt);
        if (IS_ERR(vgpu)) {
                ret = PTR_ERR(vgpu);
@@ -441,6 +381,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        gvt_dbg_core("gvt device initialization is done\n");
        dev_priv->gvt = gvt;
+       intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+       intel_gvt_host.initialized = true;
        return 0;
 
 out_clean_types:
@@ -467,6 +409,45 @@ out_clean_idr:
        return ret;
 }
 
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
-MODULE_SOFTDEP("pre: kvmgt");
-#endif
+int
+intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
+{
+       int ret;
+       void *gvt;
+
+       if (!intel_gvt_host.initialized)
+               return -ENODEV;
+
+       if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
+           m->type != INTEL_GVT_HYPERVISOR_XEN)
+               return -EINVAL;
+
+       /* Get a reference for device model module */
+       if (!try_module_get(THIS_MODULE))
+               return -ENODEV;
+
+       intel_gvt_host.mpt = m;
+       intel_gvt_host.hypervisor_type = m->type;
+       gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+
+       ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
+                                            &intel_gvt_ops);
+       if (ret < 0) {
+               gvt_err("Failed to init %s hypervisor module\n",
+                       supported_hypervisors[intel_gvt_host.hypervisor_type]);
+               module_put(THIS_MODULE);
+               return -ENODEV;
+       }
+       gvt_dbg_core("Running with hypervisor %s in host mode\n",
+                    supported_hypervisors[intel_gvt_host.hypervisor_type]);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
+
+void
+intel_gvt_unregister_hypervisor(void)
+{
+       intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+       module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
index b4ab1dad01434f9ebdd6375104433a28c1707f2a..8bce09de4b822354a68c57c10caef5fc01266757 100644 (file)
 
 #define GVT_MAX_VGPU 8
 
-enum {
-       INTEL_GVT_HYPERVISOR_XEN = 0,
-       INTEL_GVT_HYPERVISOR_KVM,
-};
-
 struct intel_gvt_host {
+       struct device *dev;
        bool initialized;
        int hypervisor_type;
        struct intel_gvt_mpt *mpt;
@@ -540,6 +536,8 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes);
 
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
+
 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
 {
        /* We are 64bit bar. */
@@ -581,6 +579,7 @@ struct intel_gvt_ops {
        int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
        int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
                                     unsigned int);
+       void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
 };
 
 
@@ -597,7 +596,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
 
 static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
 {
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put_unchecked(dev_priv);
 }
 
 /**
index b5475c91e2ef1b337ed083aae4c67a1d94fbe2e9..9c106e47e640a5dd1a03f12ac2873262f35bd295 100644 (file)
@@ -57,6 +57,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
                return D_KBL;
        else if (IS_BROXTON(gvt->dev_priv))
                return D_BXT;
+       else if (IS_COFFEELAKE(gvt->dev_priv))
+               return D_CFL;
 
        return 0;
 }
@@ -276,14 +278,12 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
        u32 old, new;
-       uint32_t ack_reg_offset;
+       u32 ack_reg_offset;
 
        old = vgpu_vreg(vgpu, offset);
        new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
 
-       if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv)
-               || IS_BROXTON(vgpu->gvt->dev_priv)) {
+       if (INTEL_GEN(vgpu->gvt->dev_priv)  >=  9) {
                switch (offset) {
                case FORCEWAKE_RENDER_GEN9_REG:
                        ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -833,7 +833,7 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
 }
 
 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
-               uint8_t t)
+               u8 t)
 {
        if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
                /* training pattern 1 for CR */
@@ -889,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
-       if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv)
-               || IS_BROXTON(vgpu->gvt->dev_priv))
+       if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
                && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
                /* SKL DPB/C/D aux ctl register changed */
                return 0;
@@ -919,7 +917,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
 
        if (op == GVT_AUX_NATIVE_WRITE) {
                int t;
-               uint8_t buf[16];
+               u8 buf[16];
 
                if ((addr + len + 1) >= DPCD_SIZE) {
                        /*
@@ -1407,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
        switch (cmd) {
        case GEN9_PCODE_READ_MEM_LATENCY:
                if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-                        || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+                        || IS_KABYLAKE(vgpu->gvt->dev_priv)
+                        || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
                        /**
                         * "Read memory latency" command on gen9.
                         * Below memory latency values are read
@@ -1431,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
                break;
        case SKL_PCODE_CDCLK_CONTROL:
                if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-                        || IS_KABYLAKE(vgpu->gvt->dev_priv))
+                        || IS_KABYLAKE(vgpu->gvt->dev_priv)
+                        || IS_COFFEELAKE(vgpu->gvt->dev_priv))
                        *data0 = SKL_CDCLK_READY_FOR_CHANGE;
                break;
        case GEN6_PCODE_READ_RC6VIDS:
@@ -3041,8 +3041,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                 NULL, NULL);
 
-       MMIO_D(_MMIO(0x4ab8), D_KBL);
-       MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+       MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
+       MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
 
        return 0;
 }
@@ -3302,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
                if (ret)
                        goto err;
        } else if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)) {
+               || IS_KABYLAKE(dev_priv)
+               || IS_COFFEELAKE(dev_priv)) {
                ret = init_broadwell_mmio_info(gvt);
                if (ret)
                        goto err;
index 5af11cf1b48235c46079f376686112c5234abbfd..5e01cc8d9b166a2992f5bd9b5b942065d380d9de 100644 (file)
 #ifndef _GVT_HYPERCALL_H_
 #define _GVT_HYPERCALL_H_
 
+enum hypervisor_type {
+       INTEL_GVT_HYPERVISOR_XEN = 0,
+       INTEL_GVT_HYPERVISOR_KVM,
+};
+
 /*
  * Specific GVT-g MPT modules function collections. Currently GVT-g supports
  * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
  */
 struct intel_gvt_mpt {
+       enum hypervisor_type type;
        int (*host_init)(struct device *dev, void *gvt, const void *ops);
-       void (*host_exit)(struct device *dev, void *gvt);
+       void (*host_exit)(struct device *dev);
        int (*attach_vgpu)(void *vgpu, unsigned long *handle);
        void (*detach_vgpu)(unsigned long handle);
        int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
@@ -61,12 +67,12 @@ struct intel_gvt_mpt {
        int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
                             bool map);
        int (*set_opregion)(void *vgpu);
+       int (*set_edid)(void *vgpu, int port_num);
        int (*get_vfio_device)(void *vgpu);
        void (*put_vfio_device)(void *vgpu);
        bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
 };
 
 extern struct intel_gvt_mpt xengt_mpt;
-extern struct intel_gvt_mpt kvmgt_mpt;
 
 #endif /* _GVT_HYPERCALL_H_ */
index 6b9d1354ff29be770a68f75d1f5506070c45496d..67125c5eec6eb59c7502065ae2c4a3698b390822 100644 (file)
@@ -581,9 +581,7 @@ static void gen8_init_irq(
 
                SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
                SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
-       } else if (IS_SKYLAKE(gvt->dev_priv)
-                       || IS_KABYLAKE(gvt->dev_priv)
-                       || IS_BROXTON(gvt->dev_priv)) {
+       } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
                SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
index c1072143da1dc3968767b94573383d3b06f25883..63eef86a2a85e28998ced72a1d516f23fbd69c02 100644 (file)
@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops;
 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
 
+#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
+
 #define OPREGION_SIGNATURE "IntelGraphicsMem"
 
 struct vfio_region;
@@ -76,6 +78,11 @@ struct vfio_region {
        void                            *data;
 };
 
+struct vfio_edid_region {
+       struct vfio_region_gfx_edid vfio_edid_regs;
+       void *edid_blob;
+};
+
 struct kvmgt_pgfn {
        gfn_t gfn;
        struct hlist_node hnode;
@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
        .release = intel_vgpu_reg_release_opregion,
 };
 
+static int handle_edid_regs(struct intel_vgpu *vgpu,
+                       struct vfio_edid_region *region, char *buf,
+                       size_t count, u16 offset, bool is_write)
+{
+       struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
+       unsigned int data;
+
+       if (offset + count > sizeof(*regs))
+               return -EINVAL;
+
+       if (count != 4)
+               return -EINVAL;
+
+       if (is_write) {
+               data = *((unsigned int *)buf);
+               switch (offset) {
+               case offsetof(struct vfio_region_gfx_edid, link_state):
+                       if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
+                               if (!drm_edid_block_valid(
+                                       (u8 *)region->edid_blob,
+                                       0,
+                                       true,
+                                       NULL)) {
+                                       gvt_vgpu_err("invalid EDID blob\n");
+                                       return -EINVAL;
+                               }
+                               intel_gvt_ops->emulate_hotplug(vgpu, true);
+                       } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
+                               intel_gvt_ops->emulate_hotplug(vgpu, false);
+                       else {
+                               gvt_vgpu_err("invalid EDID link state %d\n",
+                                       regs->link_state);
+                               return -EINVAL;
+                       }
+                       regs->link_state = data;
+                       break;
+               case offsetof(struct vfio_region_gfx_edid, edid_size):
+                       if (data > regs->edid_max_size) {
+                               gvt_vgpu_err("EDID size is bigger than %d!\n",
+                                       regs->edid_max_size);
+                               return -EINVAL;
+                       }
+                       regs->edid_size = data;
+                       break;
+               default:
+                       /* read-only regs */
+                       gvt_vgpu_err("write read-only EDID region at offset %d\n",
+                               offset);
+                       return -EPERM;
+               }
+       } else {
+               memcpy(buf, (char *)regs + offset, count);
+       }
+
+       return count;
+}
+
+static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
+                       size_t count, u16 offset, bool is_write)
+{
+       if (offset + count > region->vfio_edid_regs.edid_size)
+               return -EINVAL;
+
+       if (is_write)
+               memcpy(region->edid_blob + offset, buf, count);
+       else
+               memcpy(buf, region->edid_blob + offset, count);
+
+       return count;
+}
+
+static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
+               size_t count, loff_t *ppos, bool iswrite)
+{
+       int ret;
+       unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+                       VFIO_PCI_NUM_REGIONS;
+       struct vfio_edid_region *region =
+               (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+       if (pos < region->vfio_edid_regs.edid_offset) {
+               ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
+       } else {
+               pos -= EDID_BLOB_OFFSET;
+               ret = handle_edid_blob(region, buf, count, pos, iswrite);
+       }
+
+       if (ret < 0)
+               gvt_vgpu_err("failed to access EDID region\n");
+
+       return ret;
+}
+
+static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
+                                       struct vfio_region *region)
+{
+       kfree(region->data);
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
+       .rw = intel_vgpu_reg_rw_edid,
+       .release = intel_vgpu_reg_release_edid,
+};
+
 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
                unsigned int type, unsigned int subtype,
                const struct intel_vgpu_regops *ops,
@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu)
        return ret;
 }
 
+static int kvmgt_set_edid(void *p_vgpu, int port_num)
+{
+       struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+       struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+       struct vfio_edid_region *base;
+       int ret;
+
+       base = kzalloc(sizeof(*base), GFP_KERNEL);
+       if (!base)
+               return -ENOMEM;
+
+       /* TODO: Add multi-port and EDID extension block support */
+       base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
+       base->vfio_edid_regs.edid_max_size = EDID_SIZE;
+       base->vfio_edid_regs.edid_size = EDID_SIZE;
+       base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
+       base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
+       base->edid_blob = port->edid->edid_block;
+
+       ret = intel_vgpu_register_reg(vgpu,
+                       VFIO_REGION_TYPE_GFX,
+                       VFIO_REGION_SUBTYPE_GFX_EDID,
+                       &intel_vgpu_regops_edid, EDID_SIZE,
+                       VFIO_REGION_INFO_FLAG_READ |
+                       VFIO_REGION_INFO_FLAG_WRITE |
+                       VFIO_REGION_INFO_FLAG_CAPS, base);
+
+       return ret;
+}
+
 static void kvmgt_put_vfio_device(void *vgpu)
 {
        if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
@@ -627,6 +769,12 @@ static int intel_vgpu_open(struct mdev_device *mdev)
                goto undo_iommu;
        }
 
+       /* Take a module reference as mdev core doesn't take
+        * a reference for vendor driver.
+        */
+       if (!try_module_get(THIS_MODULE))
+               goto undo_group;
+
        ret = kvmgt_guest_init(mdev);
        if (ret)
                goto undo_group;
@@ -679,6 +827,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
                                        &vgpu->vdev.group_notifier);
        WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
 
+       /* dereference module reference taken at open */
+       module_put(THIS_MODULE);
+
        info = (struct kvmgt_guest_info *)vgpu->handle;
        kvmgt_guest_exit(info);
 
@@ -703,7 +854,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
        __intel_vgpu_release(vgpu);
 }
 
-static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
+static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
 {
        u32 start_lo, start_hi;
        u32 mem_type;
@@ -730,10 +881,10 @@ static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
        return ((u64)start_hi << 32) | start_lo;
 }
 
-static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
+static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
                             void *buf, unsigned int count, bool is_write)
 {
-       uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
+       u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
        int ret;
 
        if (is_write)
@@ -745,13 +896,13 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
        return ret;
 }
 
-static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
+static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
 {
        return off >= vgpu_aperture_offset(vgpu) &&
               off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
 }
 
-static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
+static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
                void *buf, unsigned long count, bool is_write)
 {
        void *aperture_va;
@@ -783,7 +934,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
 {
        struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
        unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
-       uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+       u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
        int ret = -EINVAL;
 
 
@@ -1029,7 +1180,7 @@ static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
 
 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
                        unsigned int index, unsigned int start,
-                       unsigned int count, uint32_t flags,
+                       unsigned int count, u32 flags,
                        void *data)
 {
        return 0;
@@ -1037,21 +1188,21 @@ static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
 
 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
                        unsigned int index, unsigned int start,
-                       unsigned int count, uint32_t flags, void *data)
+                       unsigned int count, u32 flags, void *data)
 {
        return 0;
 }
 
 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
                unsigned int index, unsigned int start, unsigned int count,
-               uint32_t flags, void *data)
+               u32 flags, void *data)
 {
        return 0;
 }
 
 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
                unsigned int index, unsigned int start, unsigned int count,
-               uint32_t flags, void *data)
+               u32 flags, void *data)
 {
        struct eventfd_ctx *trigger;
 
@@ -1070,12 +1221,12 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
        return 0;
 }
 
-static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
                unsigned int index, unsigned int start, unsigned int count,
                void *data)
 {
        int (*func)(struct intel_vgpu *vgpu, unsigned int index,
-                       unsigned int start, unsigned int count, uint32_t flags,
+                       unsigned int start, unsigned int count, u32 flags,
                        void *data) = NULL;
 
        switch (index) {
@@ -1467,7 +1618,7 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
        return mdev_register_device(dev, &intel_vgpu_ops);
 }
 
-static void kvmgt_host_exit(struct device *dev, void *gvt)
+static void kvmgt_host_exit(struct device *dev)
 {
        mdev_unregister_device(dev);
 }
@@ -1849,7 +2000,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
        return ret;
 }
 
-struct intel_gvt_mpt kvmgt_mpt = {
+static struct intel_gvt_mpt kvmgt_mpt = {
+       .type = INTEL_GVT_HYPERVISOR_KVM,
        .host_init = kvmgt_host_init,
        .host_exit = kvmgt_host_exit,
        .attach_vgpu = kvmgt_attach_vgpu,
@@ -1864,19 +2016,22 @@ struct intel_gvt_mpt kvmgt_mpt = {
        .dma_map_guest_page = kvmgt_dma_map_guest_page,
        .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
        .set_opregion = kvmgt_set_opregion,
+       .set_edid = kvmgt_set_edid,
        .get_vfio_device = kvmgt_get_vfio_device,
        .put_vfio_device = kvmgt_put_vfio_device,
        .is_valid_gfn = kvmgt_is_valid_gfn,
 };
-EXPORT_SYMBOL_GPL(kvmgt_mpt);
 
 static int __init kvmgt_init(void)
 {
+       if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
+               return -ENODEV;
        return 0;
 }
 
 static void __exit kvmgt_exit(void)
 {
+       intel_gvt_unregister_hypervisor();
 }
 
 module_init(kvmgt_init);
index 43f65848ecd6b8e4e2436d12a23f31c2bea692c9..ed4df2f6d60b6fe233b912917499505d52642d0e 100644 (file)
@@ -57,7 +57,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
        (reg >= gvt->device_info.gtt_start_offset \
         && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
 
-static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
+static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
                void *p_data, unsigned int bytes, bool read)
 {
        struct intel_gvt *gvt = NULL;
@@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
  * Returns:
  * Zero on success, negative error code if failed
  */
-int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
                void *p_data, unsigned int bytes)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -171,7 +171,7 @@ out:
  * Returns:
  * Zero on success, negative error code if failed
  */
-int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
                void *p_data, unsigned int bytes)
 {
        struct intel_gvt *gvt = vgpu->gvt;
index 1ffc69eba30e385a21469847c6681f420254d67d..5874f1cb43062cdf39f2818c4b6340ec16c4bb03 100644 (file)
@@ -43,15 +43,16 @@ struct intel_vgpu;
 #define D_SKL  (1 << 1)
 #define D_KBL  (1 << 2)
 #define D_BXT  (1 << 3)
+#define D_CFL  (1 << 4)
 
-#define D_GEN9PLUS     (D_SKL | D_KBL | D_BXT)
-#define D_GEN8PLUS     (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_GEN9PLUS     (D_SKL | D_KBL | D_BXT | D_CFL)
+#define D_GEN8PLUS     (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
 
-#define D_SKL_PLUS     (D_SKL | D_KBL | D_BXT)
-#define D_BDW_PLUS     (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_SKL_PLUS     (D_SKL | D_KBL | D_BXT | D_CFL)
+#define D_BDW_PLUS     (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
 
 #define D_PRE_SKL      (D_BDW)
-#define D_ALL          (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_ALL          (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
 
 typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
                             unsigned int);
index d6e02c15ef97d995fd4ec2ab203c15e5ff8ab245..7d84cfb9051ac886579648ac7bb2cc5e2a70b3fa 100644 (file)
@@ -353,8 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
         */
        fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
                                            FW_REG_READ | FW_REG_WRITE);
-       if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
-                       IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
+       if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
                fw |= FORCEWAKE_RENDER;
 
        intel_uncore_forcewake_get(dev_priv, fw);
@@ -391,7 +390,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
                return;
 
-       if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
+       if ((IS_KABYLAKE(dev_priv)  || IS_BROXTON(dev_priv)
+               || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
                return;
 
        if (!pre && !gen9_render_mocs.initialized)
@@ -457,9 +457,7 @@ static void switch_mmio(struct intel_vgpu *pre,
        u32 old_v, new_v;
 
        dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 9)
                switch_mocs(pre, next, ring_id);
 
        for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -471,8 +469,8 @@ static void switch_mmio(struct intel_vgpu *pre,
                 * state image on kabylake, it's initialized by lri command and
                 * save or restore with context together.
                 */
-               if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-                       && mmio->in_context)
+               if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
+                       || IS_COFFEELAKE(dev_priv)) && mmio->in_context)
                        continue;
 
                // save
@@ -565,9 +563,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 {
        struct engine_mmio *mmio;
 
-       if (IS_SKYLAKE(gvt->dev_priv) ||
-               IS_KABYLAKE(gvt->dev_priv) ||
-               IS_BROXTON(gvt->dev_priv))
+       if (INTEL_GEN(gvt->dev_priv) >= 9)
                gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
        else
                gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
index 67f19992b226f29a13d408be36da8b4820095b13..5d8b8f228d8f29c6834e27e9d6887ddf7e60a951 100644 (file)
  * Zero on success, negative error code if failed
  */
 static inline int intel_gvt_hypervisor_host_init(struct device *dev,
-                       void *gvt, const void *ops)
+                                                void *gvt, const void *ops)
 {
-       /* optional to provide */
        if (!intel_gvt_host.mpt->host_init)
-               return 0;
+               return -ENODEV;
 
        return intel_gvt_host.mpt->host_init(dev, gvt, ops);
 }
@@ -62,14 +61,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
 /**
  * intel_gvt_hypervisor_host_exit - exit GVT-g host side
  */
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
-                       void *gvt)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
 {
        /* optional to provide */
        if (!intel_gvt_host.mpt->host_exit)
                return;
 
-       intel_gvt_host.mpt->host_exit(dev, gvt);
+       intel_gvt_host.mpt->host_exit(dev);
 }
 
 /**
@@ -315,6 +313,23 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
        return intel_gvt_host.mpt->set_opregion(vgpu);
 }
 
+/**
+ * intel_gvt_hypervisor_set_edid - Set EDID region for guest
+ * @vgpu: a vGPU
+ * @port_num: display port number
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
+                                               int port_num)
+{
+       if (!intel_gvt_host.mpt->set_edid)
+               return 0;
+
+       return intel_gvt_host.mpt->set_edid(vgpu, port_num);
+}
+
 /**
  * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
  * @vgpu: a vGPU
@@ -362,4 +377,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn(
        return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
 }
 
+int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
+void intel_gvt_unregister_hypervisor(void);
+
 #endif /* _GVT_MPT_H_ */
index c32e7d5e862914b787d7cf5cbf706cb5c4a41748..1c763a27a41219614080e7a7101c2bb3de56fdf4 100644 (file)
@@ -94,7 +94,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
 {
        struct vgpu_sched_data *vgpu_data;
        struct list_head *pos;
-       static uint64_t stage_check;
+       static u64 stage_check;
        int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
 
        /* The timeslice accumulation reset at stage 0, which is
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put_unchecked(dev_priv);
        mutex_unlock(&vgpu->gvt->sched_lock);
 }
index 1ad8c5e1455d782160d15c4a1c83cb6f64dad3cf..b7957eefb9763b5f310ad81c141136ce7911add8 100644 (file)
@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
        void *shadow_ring_buffer_va;
        u32 *cs;
 
-       if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+       if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
+               || IS_COFFEELAKE(req->i915))
                && is_inhibit_context(req->hw_context))
                intel_vgpu_restore_inhibit_context(vgpu, req);
 
@@ -939,9 +940,7 @@ static int workload_thread(void *priv)
        struct intel_vgpu_workload *workload = NULL;
        struct intel_vgpu *vgpu = NULL;
        int ret;
-       bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
-                       || IS_KABYLAKE(gvt->dev_priv)
-                       || IS_BROXTON(gvt->dev_priv);
+       bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        kfree(p);
@@ -997,7 +996,7 @@ complete:
                        intel_uncore_forcewake_put(gvt->dev_priv,
                                        FORCEWAKE_ALL);
 
-               intel_runtime_pm_put(gvt->dev_priv);
+               intel_runtime_pm_put_unchecked(gvt->dev_priv);
                if (ret && (vgpu_is_vm_unhealthy(ret)))
                        enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
        }
@@ -1451,7 +1450,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                mutex_lock(&dev_priv->drm.struct_mutex);
                ret = intel_gvt_scan_and_shadow_workload(workload);
                mutex_unlock(&dev_priv->drm.struct_mutex);
-               intel_runtime_pm_put(dev_priv);
+               intel_runtime_pm_put_unchecked(dev_priv);
        }
 
        if (ret && (vgpu_is_vm_unhealthy(ret))) {
index ca5529d0e48ef20b0d2dbdd6e33512a2234afda7..1e9eec6a32fee8fea81e365ff030af0485c6a477 100644 (file)
@@ -61,7 +61,7 @@ struct shadow_indirect_ctx {
        unsigned long guest_gma;
        unsigned long shadow_gma;
        void *shadow_va;
-       uint32_t size;
+       u32 size;
 };
 
 #define PER_CTX_ADDR_MASK 0xfffff000
index 1fd64202d74e740c684fa4637386030057500054..6d787750d279f63ed3f08e06d01a18401a52367e 100644 (file)
@@ -228,7 +228,7 @@ TRACE_EVENT(oos_sync,
 TRACE_EVENT(gvt_command,
        TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
                u32 cmd_len,  u32 buf_type, u32 buf_addr_type,
-               void *workload, char *cmd_name),
+               void *workload, const char *cmd_name),
 
        TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
                buf_addr_type, workload, cmd_name),
index c628be05fbfe907a1bce89fd4727df79906fe63f..720e2b10adaa10f8ccc4c7472da4e300eb4b296f 100644 (file)
@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
                gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
                                                   high_avail / vgpu_types[i].high_mm);
 
-               if (IS_GEN8(gvt->dev_priv))
+               if (IS_GEN(gvt->dev_priv, 8))
                        sprintf(gvt->types[i].name, "GVTg_V4_%s",
                                                vgpu_types[i].name);
-               else if (IS_GEN9(gvt->dev_priv))
+               else if (IS_GEN(gvt->dev_priv, 9))
                        sprintf(gvt->types[i].name, "GVTg_V5_%s",
                                                vgpu_types[i].name);
 
@@ -428,6 +428,12 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_sched_policy;
 
+       /*TODO: add more platforms support */
+       if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+               ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+       if (ret)
+               goto out_clean_sched_policy;
+
        return vgpu;
 
 out_clean_sched_policy:
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
new file mode 100644 (file)
index 0000000..215b6ff
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_active.h"
+
+#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
+
+/*
+ * Active refs memory management
+ *
+ * To be more economical with memory, we reap all the i915_active trees as
+ * they idle (when we know the active requests are inactive) and allocate the
+ * nodes from a local slab cache to hopefully reduce the fragmentation.
+ */
+static struct i915_global_active {
+       struct kmem_cache *slab_cache;
+} global;
+
+struct active_node {
+       struct i915_active_request base;
+       struct i915_active *ref;
+       struct rb_node node;
+       u64 timeline;
+};
+
+static void
+__active_park(struct i915_active *ref)
+{
+       struct active_node *it, *n;
+
+       rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+               GEM_BUG_ON(i915_active_request_isset(&it->base));
+               kmem_cache_free(global.slab_cache, it);
+       }
+       ref->tree = RB_ROOT;
+}
+
+static void
+__active_retire(struct i915_active *ref)
+{
+       GEM_BUG_ON(!ref->count);
+       if (--ref->count)
+               return;
+
+       /* return the unused nodes to our slabcache */
+       __active_park(ref);
+
+       ref->retire(ref);
+}
+
+static void
+node_retire(struct i915_active_request *base, struct i915_request *rq)
+{
+       __active_retire(container_of(base, struct active_node, base)->ref);
+}
+
+static void
+last_retire(struct i915_active_request *base, struct i915_request *rq)
+{
+       __active_retire(container_of(base, struct i915_active, last));
+}
+
+static struct i915_active_request *
+active_instance(struct i915_active *ref, u64 idx)
+{
+       struct active_node *node;
+       struct rb_node **p, *parent;
+       struct i915_request *old;
+
+       /*
+        * We track the most recently used timeline to skip a rbtree search
+        * for the common case, under typical loads we never need the rbtree
+        * at all. We can reuse the last slot if it is empty, that is
+        * after the previous activity has been retired, or if it matches the
+        * current timeline.
+        *
+        * Note that we allow the timeline to be active simultaneously in
+        * the rbtree and the last cache. We do this to avoid having
+        * to search and replace the rbtree element for a new timeline, with
+        * the cost being that we must be aware that the ref may be retired
+        * twice for the same timeline (as the older rbtree element will be
+        * retired before the new request added to last).
+        */
+       old = i915_active_request_raw(&ref->last, BKL(ref));
+       if (!old || old->fence.context == idx)
+               goto out;
+
+       /* Move the currently active fence into the rbtree */
+       idx = old->fence.context;
+
+       parent = NULL;
+       p = &ref->tree.rb_node;
+       while (*p) {
+               parent = *p;
+
+               node = rb_entry(parent, struct active_node, node);
+               if (node->timeline == idx)
+                       goto replace;
+
+               if (node->timeline < idx)
+                       p = &parent->rb_right;
+               else
+                       p = &parent->rb_left;
+       }
+
+       node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+
+       /* kmalloc may retire the ref->last (thanks shrinker)! */
+       if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
+               kmem_cache_free(global.slab_cache, node);
+               goto out;
+       }
+
+       if (unlikely(!node))
+               return ERR_PTR(-ENOMEM);
+
+       i915_active_request_init(&node->base, NULL, node_retire);
+       node->ref = ref;
+       node->timeline = idx;
+
+       rb_link_node(&node->node, parent, p);
+       rb_insert_color(&node->node, &ref->tree);
+
+replace:
+       /*
+        * Overwrite the previous active slot in the rbtree with last,
+        * leaving last zeroed. If the previous slot is still active,
+        * we must be careful as we now only expect to receive one retire
+        * callback not two, and so much undo the active counting for the
+        * overwritten slot.
+        */
+       if (i915_active_request_isset(&node->base)) {
+               /* Retire ourselves from the old rq->active_list */
+               __list_del_entry(&node->base.link);
+               ref->count--;
+               GEM_BUG_ON(!ref->count);
+       }
+       GEM_BUG_ON(list_empty(&ref->last.link));
+       list_replace_init(&ref->last.link, &node->base.link);
+       node->base.request = fetch_and_zero(&ref->last.request);
+
+out:
+       return &ref->last;
+}
+
+void i915_active_init(struct drm_i915_private *i915,
+                     struct i915_active *ref,
+                     void (*retire)(struct i915_active *ref))
+{
+       ref->i915 = i915;
+       ref->retire = retire;
+       ref->tree = RB_ROOT;
+       i915_active_request_init(&ref->last, NULL, last_retire);
+       ref->count = 0;
+}
+
+int i915_active_ref(struct i915_active *ref,
+                   u64 timeline,
+                   struct i915_request *rq)
+{
+       struct i915_active_request *active;
+
+       active = active_instance(ref, timeline);
+       if (IS_ERR(active))
+               return PTR_ERR(active);
+
+       if (!i915_active_request_isset(active))
+               ref->count++;
+       __i915_active_request_set(active, rq);
+
+       GEM_BUG_ON(!ref->count);
+       return 0;
+}
+
+bool i915_active_acquire(struct i915_active *ref)
+{
+       lockdep_assert_held(BKL(ref));
+       return !ref->count++;
+}
+
+void i915_active_release(struct i915_active *ref)
+{
+       lockdep_assert_held(BKL(ref));
+       __active_retire(ref);
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+       struct active_node *it, *n;
+       int ret = 0;
+
+       if (i915_active_acquire(ref))
+               goto out_release;
+
+       ret = i915_active_request_retire(&ref->last, BKL(ref));
+       if (ret)
+               goto out_release;
+
+       rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+               ret = i915_active_request_retire(&it->base, BKL(ref));
+               if (ret)
+                       break;
+       }
+
+out_release:
+       i915_active_release(ref);
+       return ret;
+}
+
+int i915_request_await_active_request(struct i915_request *rq,
+                                     struct i915_active_request *active)
+{
+       struct i915_request *barrier =
+               i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
+
+       return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
+}
+
+int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+{
+       struct active_node *it, *n;
+       int ret;
+
+       ret = i915_request_await_active_request(rq, &ref->last);
+       if (ret)
+               return ret;
+
+       rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+               ret = i915_request_await_active_request(rq, &it->base);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void i915_active_fini(struct i915_active *ref)
+{
+       GEM_BUG_ON(i915_active_request_isset(&ref->last));
+       GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
+       GEM_BUG_ON(ref->count);
+}
+#endif
+
+int i915_active_request_set(struct i915_active_request *active,
+                           struct i915_request *rq)
+{
+       int err;
+
+       /* Must maintain ordering wrt previous active requests */
+       err = i915_request_await_active_request(rq, active);
+       if (err)
+               return err;
+
+       __i915_active_request_set(active, rq);
+       return 0;
+}
+
+void i915_active_retire_noop(struct i915_active_request *active,
+                            struct i915_request *request)
+{
+       /* Space left intentionally blank */
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_active.c"
+#endif
+
+int __init i915_global_active_init(void)
+{
+       global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+       if (!global.slab_cache)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void __exit i915_global_active_exit(void)
+{
+       kmem_cache_destroy(global.slab_cache);
+}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
new file mode 100644 (file)
index 0000000..12b5c1d
--- /dev/null
@@ -0,0 +1,425 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#ifndef _I915_ACTIVE_H_
+#define _I915_ACTIVE_H_
+
+#include <linux/lockdep.h>
+
+#include "i915_active_types.h"
+#include "i915_request.h"
+
+/*
+ * We treat requests as fences. This is not be to confused with our
+ * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
+ * We use the fences to synchronize access from the CPU with activity on the
+ * GPU, for example, we should not rewrite an object's PTE whilst the GPU
+ * is reading them. We also track fences at a higher level to provide
+ * implicit synchronisation around GEM objects, e.g. set-domain will wait
+ * for outstanding GPU rendering before marking the object ready for CPU
+ * access, or a pageflip will wait until the GPU is complete before showing
+ * the frame on the scanout.
+ *
+ * In order to use a fence, the object must track the fence it needs to
+ * serialise with. For example, GEM objects want to track both read and
+ * write access so that we can perform concurrent read operations between
+ * the CPU and GPU engines, as well as waiting for all rendering to
+ * complete, or waiting for the last GPU user of a "fence register". The
+ * object then embeds a #i915_active_request to track the most recent (in
+ * retirement order) request relevant for the desired mode of access.
+ * The #i915_active_request is updated with i915_active_request_set() to
+ * track the most recent fence request, typically this is done as part of
+ * i915_vma_move_to_active().
+ *
+ * When the #i915_active_request completes (is retired), it will
+ * signal its completion to the owner through a callback as well as mark
+ * itself as idle (i915_active_request.request == NULL). The owner
+ * can then perform any action, such as delayed freeing of an active
+ * resource including itself.
+ */
+
+void i915_active_retire_noop(struct i915_active_request *active,
+                            struct i915_request *request);
+
+/**
+ * i915_active_request_init - prepares the activity tracker for use
+ * @active - the active tracker
+ * @rq - initial request to track, can be NULL
+ * @func - a callback when then the tracker is retired (becomes idle),
+ *         can be NULL
+ *
+ * i915_active_request_init() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active request
+ * associated with it. When the last request becomes idle, when it is retired
+ * after completion, the optional callback @func is invoked.
+ */
+static inline void
+i915_active_request_init(struct i915_active_request *active,
+                        struct i915_request *rq,
+                        i915_active_retire_fn retire)
+{
+       RCU_INIT_POINTER(active->request, rq);
+       INIT_LIST_HEAD(&active->link);
+       active->retire = retire ?: i915_active_retire_noop;
+}
+
+#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
+
+/**
+ * i915_active_request_set - updates the tracker to watch the current request
+ * @active - the active tracker
+ * @request - the request to watch
+ *
+ * __i915_active_request_set() watches the given @request for completion. Whilst
+ * that @request is busy, the @active reports busy. When that @request is
+ * retired, the @active tracker is updated to report idle.
+ */
+static inline void
+__i915_active_request_set(struct i915_active_request *active,
+                         struct i915_request *request)
+{
+       list_move(&active->link, &request->active_list);
+       rcu_assign_pointer(active->request, request);
+}
+
+int __must_check
+i915_active_request_set(struct i915_active_request *active,
+                       struct i915_request *rq);
+
+/**
+ * i915_active_request_set_retire_fn - updates the retirement callback
+ * @active - the active tracker
+ * @fn - the routine called when the request is retired
+ * @mutex - struct_mutex used to guard retirements
+ *
+ * i915_active_request_set_retire_fn() updates the function pointer that
+ * is called when the final request associated with the @active tracker
+ * is retired.
+ */
+static inline void
+i915_active_request_set_retire_fn(struct i915_active_request *active,
+                                 i915_active_retire_fn fn,
+                                 struct mutex *mutex)
+{
+       lockdep_assert_held(mutex);
+       active->retire = fn ?: i915_active_retire_noop;
+}
+
+static inline struct i915_request *
+__i915_active_request_peek(const struct i915_active_request *active)
+{
+       /*
+        * Inside the error capture (running with the driver in an unknown
+        * state), we want to bend the rules slightly (a lot).
+        *
+        * Work is in progress to make it safer, in the meantime this keeps
+        * the known issue from spamming the logs.
+        */
+       return rcu_dereference_protected(active->request, 1);
+}
+
+/**
+ * i915_active_request_raw - return the active request
+ * @active - the active tracker
+ *
+ * i915_active_request_raw() returns the current request being tracked, or NULL.
+ * It does not obtain a reference on the request for the caller, so the caller
+ * must hold struct_mutex.
+ */
+static inline struct i915_request *
+i915_active_request_raw(const struct i915_active_request *active,
+                       struct mutex *mutex)
+{
+       return rcu_dereference_protected(active->request,
+                                        lockdep_is_held(mutex));
+}
+
+/**
+ * i915_active_request_peek - report the active request being monitored
+ * @active - the active tracker
+ *
+ * i915_active_request_peek() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, so the caller must hold struct_mutex.
+ */
+static inline struct i915_request *
+i915_active_request_peek(const struct i915_active_request *active,
+                        struct mutex *mutex)
+{
+       struct i915_request *request;
+
+       request = i915_active_request_raw(active, mutex);
+       if (!request || i915_request_completed(request))
+               return NULL;
+
+       return request;
+}
+
+/**
+ * i915_active_request_get - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_active_request_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold struct_mutex.
+ */
+static inline struct i915_request *
+i915_active_request_get(const struct i915_active_request *active,
+                       struct mutex *mutex)
+{
+       return i915_request_get(i915_active_request_peek(active, mutex));
+}
+
+/**
+ * __i915_active_request_get_rcu - return a reference to the active request
+ * @active - the active tracker
+ *
+ * __i915_active_request_get() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The caller must hold the RCU read
+ * lock, but the returned pointer is safe to use outside of RCU.
+ */
+static inline struct i915_request *
+__i915_active_request_get_rcu(const struct i915_active_request *active)
+{
+       /*
+        * Performing a lockless retrieval of the active request is super
+        * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
+        * slab of request objects will not be freed whilst we hold the
+        * RCU read lock. It does not guarantee that the request itself
+        * will not be freed and then *reused*. Viz,
+        *
+        * Thread A                     Thread B
+        *
+        * rq = active.request
+        *                              retire(rq) -> free(rq);
+        *                              (rq is now first on the slab freelist)
+        *                              active.request = NULL
+        *
+        *                              rq = new submission on a new object
+        * ref(rq)
+        *
+        * To prevent the request from being reused whilst the caller
+        * uses it, we take a reference like normal. Whilst acquiring
+        * the reference we check that it is not in a destroyed state
+        * (refcnt == 0). That prevents the request being reallocated
+        * whilst the caller holds on to it. To check that the request
+        * was not reallocated as we acquired the reference we have to
+        * check that our request remains the active request across
+        * the lookup, in the same manner as a seqlock. The visibility
+        * of the pointer versus the reference counting is controlled
+        * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
+        *
+        * In the middle of all that, we inspect whether the request is
+        * complete. Retiring is lazy so the request may be completed long
+        * before the active tracker is updated. Querying whether the
+        * request is complete is far cheaper (as it involves no locked
+        * instructions setting cachelines to exclusive) than acquiring
+        * the reference, so we do it first. The RCU read lock ensures the
+        * pointer dereference is valid, but does not ensure that the
+        * seqno nor HWS is the right one! However, if the request was
+        * reallocated, that means the active tracker's request was complete.
+        * If the new request is also complete, then both are and we can
+        * just report the active tracker is idle. If the new request is
+        * incomplete, then we acquire a reference on it and check that
+        * it remained the active request.
+        *
+        * It is then imperative that we do not zero the request on
+        * reallocation, so that we can chase the dangling pointers!
+        * See i915_request_alloc().
+        */
+       do {
+               struct i915_request *request;
+
+               request = rcu_dereference(active->request);
+               if (!request || i915_request_completed(request))
+                       return NULL;
+
+               /*
+                * An especially silly compiler could decide to recompute the
+                * result of i915_request_completed, more specifically
+                * re-emit the load for request->fence.seqno. A race would catch
+                * a later seqno value, which could flip the result from true to
+                * false. Which means part of the instructions below might not
+                * be executed, while later on instructions are executed. Due to
+                * barriers within the refcounting the inconsistency can't reach
+                * past the call to i915_request_get_rcu, but not executing
+                * that while still executing i915_request_put() creates
+                * havoc enough.  Prevent this with a compiler barrier.
+                */
+               barrier();
+
+               request = i915_request_get_rcu(request);
+
+               /*
+                * What stops the following rcu_access_pointer() from occurring
+                * before the above i915_request_get_rcu()? If we were
+                * to read the value before pausing to get the reference to
+                * the request, we may not notice a change in the active
+                * tracker.
+                *
+                * The rcu_access_pointer() is a mere compiler barrier, which
+                * means both the CPU and compiler are free to perform the
+                * memory read without constraint. The compiler only has to
+                * ensure that any operations after the rcu_access_pointer()
+                * occur afterwards in program order. This means the read may
+                * be performed earlier by an out-of-order CPU, or adventurous
+                * compiler.
+                *
+                * The atomic operation at the heart of
+                * i915_request_get_rcu(), see dma_fence_get_rcu(), is
+                * atomic_inc_not_zero() which is only a full memory barrier
+                * when successful. That is, if i915_request_get_rcu()
+                * returns the request (and so with the reference counted
+                * incremented) then the following read for rcu_access_pointer()
+                * must occur after the atomic operation and so confirm
+                * that this request is the one currently being tracked.
+                *
+                * The corresponding write barrier is part of
+                * rcu_assign_pointer().
+                */
+               if (!request || request == rcu_access_pointer(active->request))
+                       return rcu_pointer_handoff(request);
+
+               i915_request_put(request);
+       } while (1);
+}
+
+/**
+ * i915_active_request_get_unlocked - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_active_request_get_unlocked() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The reference is obtained under RCU,
+ * so no locking is required by the caller.
+ *
+ * The reference should be freed with i915_request_put().
+ */
+static inline struct i915_request *
+i915_active_request_get_unlocked(const struct i915_active_request *active)
+{
+       struct i915_request *request;
+
+       rcu_read_lock();
+       request = __i915_active_request_get_rcu(active);
+       rcu_read_unlock();
+
+       return request;
+}
+
+/**
+ * i915_active_request_isset - report whether the active tracker is assigned
+ * @active - the active tracker
+ *
+ * i915_active_request_isset() returns true if the active tracker is currently
+ * assigned to a request. Due to the lazy retiring, that request may be idle
+ * and this may report stale information.
+ */
+static inline bool
+i915_active_request_isset(const struct i915_active_request *active)
+{
+       return rcu_access_pointer(active->request);
+}
+
+/**
+ * i915_active_request_retire - waits until the request is retired
+ * @active - the active request on which to wait
+ *
+ * i915_active_request_retire() waits until the request is completed,
+ * and then ensures that at least the retirement handler for this
+ * @active tracker is called before returning. If the @active
+ * tracker is idle, the function returns immediately.
+ */
+static inline int __must_check
+i915_active_request_retire(struct i915_active_request *active,
+                          struct mutex *mutex)
+{
+       struct i915_request *request;
+       long ret;
+
+       request = i915_active_request_raw(active, mutex);
+       if (!request)
+               return 0;
+
+       ret = i915_request_wait(request,
+                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                               MAX_SCHEDULE_TIMEOUT);
+       if (ret < 0)
+               return ret;
+
+       list_del_init(&active->link);
+       RCU_INIT_POINTER(active->request, NULL);
+
+       active->retire(active, request);
+
+       return 0;
+}
+
+/*
+ * GPU activity tracking
+ *
+ * Each set of commands submitted to the GPU compromises a single request that
+ * signals a fence upon completion. struct i915_request combines the
+ * command submission, scheduling and fence signaling roles. If we want to see
+ * if a particular task is complete, we need to grab the fence (struct
+ * i915_request) for that task and check or wait for it to be signaled. More
+ * often though we want to track the status of a bunch of tasks, for example
+ * to wait for the GPU to finish accessing some memory across a variety of
+ * different command pipelines from different clients. We could choose to
+ * track every single request associated with the task, but knowing that
+ * each request belongs to an ordered timeline (later requests within a
+ * timeline must wait for earlier requests), we need only track the
+ * latest request in each timeline to determine the overall status of the
+ * task.
+ *
+ * struct i915_active provides this tracking across timelines. It builds a
+ * composite shared-fence, and is updated as new work is submitted to the task,
+ * forming a snapshot of the current status. It should be embedded into the
+ * different resources that need to track their associated GPU activity to
+ * provide a callback when that GPU activity has ceased, or otherwise to
+ * provide a serialisation point either for request submission or for CPU
+ * synchronisation.
+ */
+
+void i915_active_init(struct drm_i915_private *i915,
+                     struct i915_active *ref,
+                     void (*retire)(struct i915_active *ref));
+
+int i915_active_ref(struct i915_active *ref,
+                   u64 timeline,
+                   struct i915_request *rq);
+
+int i915_active_wait(struct i915_active *ref);
+
+int i915_request_await_active(struct i915_request *rq,
+                             struct i915_active *ref);
+int i915_request_await_active_request(struct i915_request *rq,
+                                     struct i915_active_request *active);
+
+bool i915_active_acquire(struct i915_active *ref);
+
+static inline void i915_active_cancel(struct i915_active *ref)
+{
+       GEM_BUG_ON(ref->count != 1);
+       ref->count = 0;
+}
+
+void i915_active_release(struct i915_active *ref);
+
+static inline bool
+i915_active_is_idle(const struct i915_active *ref)
+{
+       return !ref->count;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void i915_active_fini(struct i915_active *ref);
+#else
+static inline void i915_active_fini(struct i915_active *ref) { }
+#endif
+
+int i915_global_active_init(void);
+void i915_global_active_exit(void);
+
+#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
new file mode 100644 (file)
index 0000000..b679253
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#ifndef _I915_ACTIVE_TYPES_H_
+#define _I915_ACTIVE_TYPES_H_
+
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+
+struct drm_i915_private;
+struct i915_active_request;
+struct i915_request;
+
+typedef void (*i915_active_retire_fn)(struct i915_active_request *,
+                                     struct i915_request *);
+
+struct i915_active_request {
+       struct i915_request __rcu *request;
+       struct list_head link;
+       i915_active_retire_fn retire;
+};
+
+struct i915_active {
+       struct drm_i915_private *i915;
+
+       struct rb_root tree;
+       struct i915_active_request last;
+       unsigned int count;
+
+       void (*retire)(struct i915_active *ref);
+};
+
+#endif /* _I915_ACTIVE_TYPES_H_ */
index 95478db9998b51a410b927d654990967c74c5fdc..33e8eed64423af5f00fac38c6ca88dccd7b7911e 100644 (file)
@@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
        int cmd_table_count;
        int ret;
 
-       if (!IS_GEN7(engine->i915))
+       if (!IS_GEN(engine->i915, 7))
                return;
 
        switch (engine->id) {
index 38dcee1ca062483272948bce3a7d9af2b4c83a7d..0bd890c04fe4f7c911bd9bde1a79af11af08ff5c 100644 (file)
  *
  */
 
-#include <linux/debugfs.h>
 #include <linux/sort.h>
 #include <linux/sched/mm.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
 #include "intel_drv.h"
 #include "intel_guc_submission.h"
 
+#include "i915_reset.h"
+
 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
 {
        return to_i915(node->minor->dev);
@@ -48,7 +51,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 
        intel_device_info_dump_flags(info, &p);
-       intel_device_info_dump_runtime(info, &p);
+       intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
        intel_driver_caps_print(&dev_priv->caps, &p);
 
        kernel_param_lock(THIS_MODULE);
@@ -157,14 +160,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
                seq_printf(m, " (name: %d)", obj->base.name);
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+       list_for_each_entry(vma, &obj->vma.list, obj_link) {
                if (i915_vma_is_pinned(vma))
                        pin_count++;
        }
        seq_printf(m, " (pinned x %d)", pin_count);
        if (obj->pin_global)
                seq_printf(m, " (global)");
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+       list_for_each_entry(vma, &obj->vma.list, obj_link) {
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
@@ -204,7 +207,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                if (vma->fence)
                        seq_printf(m, " , fence: %d%s",
                                   vma->fence->id,
-                                  i915_gem_active_isset(&vma->last_fence) ? "*" : "");
+                                  i915_active_request_isset(&vma->last_fence) ? "*" : "");
                seq_puts(m, ")");
        }
        if (obj->stolen)
@@ -297,11 +300,12 @@ out:
 }
 
 struct file_stats {
-       struct drm_i915_file_private *file_priv;
+       struct i915_address_space *vm;
        unsigned long count;
        u64 total, unbound;
        u64 global, shared;
        u64 active, inactive;
+       u64 closed;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
@@ -319,16 +323,14 @@ static int per_file_stats(int id, void *ptr, void *data)
        if (obj->base.name || obj->base.dma_buf)
                stats->shared += obj->base.size;
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+       list_for_each_entry(vma, &obj->vma.list, obj_link) {
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
                if (i915_vma_is_ggtt(vma)) {
                        stats->global += vma->node.size;
                } else {
-                       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
-
-                       if (ppgtt->vm.file != stats->file_priv)
+                       if (vma->vm != stats->vm)
                                continue;
                }
 
@@ -336,6 +338,9 @@ static int per_file_stats(int id, void *ptr, void *data)
                        stats->active += vma->node.size;
                else
                        stats->inactive += vma->node.size;
+
+               if (i915_vma_is_closed(vma))
+                       stats->closed += vma->node.size;
        }
 
        return 0;
@@ -343,7 +348,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 
 #define print_file_stats(m, name, stats) do { \
        if (stats.count) \
-               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
+               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
                           name, \
                           stats.count, \
                           stats.total, \
@@ -351,20 +356,19 @@ static int per_file_stats(int id, void *ptr, void *data)
                           stats.inactive, \
                           stats.global, \
                           stats.shared, \
-                          stats.unbound); \
+                          stats.unbound, \
+                          stats.closed); \
 } while (0)
 
 static void print_batch_pool_stats(struct seq_file *m,
                                   struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj;
-       struct file_stats stats;
        struct intel_engine_cs *engine;
+       struct file_stats stats = {};
        enum intel_engine_id id;
        int j;
 
-       memset(&stats, 0, sizeof(stats));
-
        for_each_engine(engine, dev_priv, id) {
                for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
                        list_for_each_entry(obj,
@@ -377,44 +381,47 @@ static void print_batch_pool_stats(struct seq_file *m,
        print_file_stats(m, "[k]batch pool", stats);
 }
 
-static int per_file_ctx_stats(int idx, void *ptr, void *data)
+static void print_context_stats(struct seq_file *m,
+                               struct drm_i915_private *i915)
 {
-       struct i915_gem_context *ctx = ptr;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       struct file_stats kstats = {};
+       struct i915_gem_context *ctx;
 
-       for_each_engine(engine, ctx->i915, id) {
-               struct intel_context *ce = to_intel_context(ctx, engine);
+       list_for_each_entry(ctx, &i915->contexts.list, link) {
+               struct intel_engine_cs *engine;
+               enum intel_engine_id id;
 
-               if (ce->state)
-                       per_file_stats(0, ce->state->obj, data);
-               if (ce->ring)
-                       per_file_stats(0, ce->ring->vma->obj, data);
-       }
+               for_each_engine(engine, i915, id) {
+                       struct intel_context *ce = to_intel_context(ctx, engine);
 
-       return 0;
-}
+                       if (ce->state)
+                               per_file_stats(0, ce->state->obj, &kstats);
+                       if (ce->ring)
+                               per_file_stats(0, ce->ring->vma->obj, &kstats);
+               }
 
-static void print_context_stats(struct seq_file *m,
-                               struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct file_stats stats;
-       struct drm_file *file;
+               if (!IS_ERR_OR_NULL(ctx->file_priv)) {
+                       struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
+                       struct drm_file *file = ctx->file_priv->file;
+                       struct task_struct *task;
+                       char name[80];
 
-       memset(&stats, 0, sizeof(stats));
+                       spin_lock(&file->table_lock);
+                       idr_for_each(&file->object_idr, per_file_stats, &stats);
+                       spin_unlock(&file->table_lock);
 
-       mutex_lock(&dev->struct_mutex);
-       if (dev_priv->kernel_context)
-               per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+                       rcu_read_lock();
+                       task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
+                       snprintf(name, sizeof(name), "%s/%d",
+                                task ? task->comm : "<unknown>",
+                                ctx->user_handle);
+                       rcu_read_unlock();
 
-       list_for_each_entry(file, &dev->filelist, lhead) {
-               struct drm_i915_file_private *fpriv = file->driver_priv;
-               idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+                       print_file_stats(m, name, stats);
+               }
        }
-       mutex_unlock(&dev->struct_mutex);
 
-       print_file_stats(m, "[k]contexts", stats);
+       print_file_stats(m, "[k]contexts", kstats);
 }
 
 static int i915_gem_object_info(struct seq_file *m, void *data)
@@ -426,14 +433,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
        u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
        struct drm_i915_gem_object *obj;
        unsigned int page_sizes = 0;
-       struct drm_file *file;
        char buf[80];
        int ret;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
        seq_printf(m, "%u objects, %llu bytes\n",
                   dev_priv->mm.object_count,
                   dev_priv->mm.object_memory);
@@ -514,43 +516,14 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                                        buf, sizeof(buf)));
 
        seq_putc(m, '\n');
-       print_batch_pool_stats(m, dev_priv);
-       mutex_unlock(&dev->struct_mutex);
-
-       mutex_lock(&dev->filelist_mutex);
-       print_context_stats(m, dev_priv);
-       list_for_each_entry_reverse(file, &dev->filelist, lhead) {
-               struct file_stats stats;
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-               struct i915_request *request;
-               struct task_struct *task;
-
-               mutex_lock(&dev->struct_mutex);
 
-               memset(&stats, 0, sizeof(stats));
-               stats.file_priv = file->driver_priv;
-               spin_lock(&file->table_lock);
-               idr_for_each(&file->object_idr, per_file_stats, &stats);
-               spin_unlock(&file->table_lock);
-               /*
-                * Although we have a valid reference on file->pid, that does
-                * not guarantee that the task_struct who called get_pid() is
-                * still alive (e.g. get_pid(current) => fork() => exit()).
-                * Therefore, we need to protect this ->comm access using RCU.
-                */
-               request = list_first_entry_or_null(&file_priv->mm.request_list,
-                                                  struct i915_request,
-                                                  client_link);
-               rcu_read_lock();
-               task = pid_task(request && request->gem_context->pid ?
-                               request->gem_context->pid : file->pid,
-                               PIDTYPE_PID);
-               print_file_stats(m, task ? task->comm : "<unknown>", stats);
-               rcu_read_unlock();
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
-               mutex_unlock(&dev->struct_mutex);
-       }
-       mutex_unlock(&dev->filelist_mutex);
+       print_batch_pool_stats(m, dev_priv);
+       print_context_stats(m, dev_priv);
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -656,10 +629,12 @@ static void gen8_display_interrupt_info(struct seq_file *m)
 
        for_each_pipe(dev_priv, pipe) {
                enum intel_display_power_domain power_domain;
+               intel_wakeref_t wakeref;
 
                power_domain = POWER_DOMAIN_PIPE(pipe);
-               if (!intel_display_power_get_if_enabled(dev_priv,
-                                                       power_domain)) {
+               wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                            power_domain);
+               if (!wakeref) {
                        seq_printf(m, "Pipe %c power disabled\n",
                                   pipe_name(pipe));
                        continue;
@@ -674,7 +649,7 @@ static void gen8_display_interrupt_info(struct seq_file *m)
                           pipe_name(pipe),
                           I915_READ(GEN8_DE_PIPE_IER(pipe)));
 
-               intel_display_power_put(dev_priv, power_domain);
+               intel_display_power_put(dev_priv, power_domain, wakeref);
        }
 
        seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -704,11 +679,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int i, pipe;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        if (IS_CHERRYVIEW(dev_priv)) {
+               intel_wakeref_t pref;
+
                seq_printf(m, "Master Interrupt Control:\t%08x\n",
                           I915_READ(GEN8_MASTER_IRQ));
 
@@ -724,8 +702,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                        enum intel_display_power_domain power_domain;
 
                        power_domain = POWER_DOMAIN_PIPE(pipe);
-                       if (!intel_display_power_get_if_enabled(dev_priv,
-                                                               power_domain)) {
+                       pref = intel_display_power_get_if_enabled(dev_priv,
+                                                                 power_domain);
+                       if (!pref) {
                                seq_printf(m, "Pipe %c power disabled\n",
                                           pipe_name(pipe));
                                continue;
@@ -735,17 +714,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                                   pipe_name(pipe),
                                   I915_READ(PIPESTAT(pipe)));
 
-                       intel_display_power_put(dev_priv, power_domain);
+                       intel_display_power_put(dev_priv, power_domain, pref);
                }
 
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+               pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
                seq_printf(m, "Port hotplug:\t%08x\n",
                           I915_READ(PORT_HOTPLUG_EN));
                seq_printf(m, "DPFLIPSTAT:\t%08x\n",
                           I915_READ(VLV_DPFLIPSTAT));
                seq_printf(m, "DPINVGTT:\t%08x\n",
                           I915_READ(DPINVGTT));
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
 
                for (i = 0; i < 4; i++) {
                        seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -808,10 +787,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(VLV_IMR));
                for_each_pipe(dev_priv, pipe) {
                        enum intel_display_power_domain power_domain;
+                       intel_wakeref_t pref;
 
                        power_domain = POWER_DOMAIN_PIPE(pipe);
-                       if (!intel_display_power_get_if_enabled(dev_priv,
-                                                               power_domain)) {
+                       pref = intel_display_power_get_if_enabled(dev_priv,
+                                                                 power_domain);
+                       if (!pref) {
                                seq_printf(m, "Pipe %c power disabled\n",
                                           pipe_name(pipe));
                                continue;
@@ -820,7 +801,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                        seq_printf(m, "Pipe %c stat:\t%08x\n",
                                   pipe_name(pipe),
                                   I915_READ(PIPESTAT(pipe)));
-                       intel_display_power_put(dev_priv, power_domain);
+                       intel_display_power_put(dev_priv, power_domain, pref);
                }
 
                seq_printf(m, "Master IER:\t%08x\n",
@@ -907,7 +888,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                }
        }
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return 0;
 }
@@ -980,12 +961,13 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
 {
        struct drm_i915_private *i915 = inode->i_private;
        struct i915_gpu_state *gpu;
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_get(i915);
-       gpu = i915_capture_gpu_state(i915);
-       intel_runtime_pm_put(i915);
-       if (!gpu)
-               return -ENOMEM;
+       gpu = NULL;
+       with_intel_runtime_pm(i915, wakeref)
+               gpu = i915_capture_gpu_state(i915);
+       if (IS_ERR(gpu))
+               return PTR_ERR(gpu);
 
        file->private_data = gpu;
        return 0;
@@ -1018,7 +1000,13 @@ i915_error_state_write(struct file *filp,
 
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
-       file->private_data = i915_first_error_state(inode->i_private);
+       struct i915_gpu_state *error;
+
+       error = i915_first_error_state(inode->i_private);
+       if (IS_ERR(error))
+               return PTR_ERR(error);
+
+       file->private_data  = error;
        return 0;
 }
 
@@ -1032,39 +1020,16 @@ static const struct file_operations i915_error_state_fops = {
 };
 #endif
 
-static int
-i915_next_seqno_set(void *data, u64 val)
-{
-       struct drm_i915_private *dev_priv = data;
-       struct drm_device *dev = &dev_priv->drm;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       intel_runtime_pm_get(dev_priv);
-       ret = i915_gem_set_global_seqno(dev, val);
-       intel_runtime_pm_put(dev_priv);
-
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
-                       NULL, i915_next_seqno_set,
-                       "0x%llx\n");
-
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       intel_wakeref_t wakeref;
        int ret = 0;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                u16 rgvswctl = I915_READ16(MEMSWCTL);
                u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
@@ -1274,7 +1239,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
        seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
        seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        return ret;
 }
 
@@ -1313,14 +1278,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
        u64 acthd[I915_NUM_ENGINES];
        u32 seqno[I915_NUM_ENGINES];
        struct intel_instdone instdone;
+       intel_wakeref_t wakeref;
        enum intel_engine_id id;
 
        if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
                seq_puts(m, "Wedged\n");
        if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
                seq_puts(m, "Reset in progress: struct_mutex backoff\n");
-       if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
-               seq_puts(m, "Reset in progress: reset handoff to waiter\n");
        if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
                seq_puts(m, "Waiter holding struct mutex\n");
        if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
@@ -1331,17 +1295,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                return 0;
        }
 
-       intel_runtime_pm_get(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               for_each_engine(engine, dev_priv, id) {
+                       acthd[id] = intel_engine_get_active_head(engine);
+                       seqno[id] = intel_engine_get_seqno(engine);
+               }
 
-       for_each_engine(engine, dev_priv, id) {
-               acthd[id] = intel_engine_get_active_head(engine);
-               seqno[id] = intel_engine_get_seqno(engine);
+               intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
        }
 
-       intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
-
-       intel_runtime_pm_put(dev_priv);
-
        if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
                seq_printf(m, "Hangcheck active, timer fires in %dms\n",
                           jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
@@ -1354,37 +1316,16 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
        seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
 
        for_each_engine(engine, dev_priv, id) {
-               struct intel_breadcrumbs *b = &engine->breadcrumbs;
-               struct rb_node *rb;
-
                seq_printf(m, "%s:\n", engine->name);
-               seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+               seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
                           engine->hangcheck.seqno, seqno[id],
-                          intel_engine_last_submit(engine));
-               seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
-                          yesno(intel_engine_has_waiter(engine)),
-                          yesno(test_bit(engine->id,
-                                         &dev_priv->gpu_error.missed_irq_rings)),
-                          yesno(engine->hangcheck.stalled),
-                          yesno(engine->hangcheck.wedged));
-
-               spin_lock_irq(&b->rb_lock);
-               for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
-                       struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
-                       seq_printf(m, "\t%s [%d] waiting for %x\n",
-                                  w->tsk->comm, w->tsk->pid, w->seqno);
-               }
-               spin_unlock_irq(&b->rb_lock);
+                          intel_engine_last_submit(engine),
+                          jiffies_to_msecs(jiffies -
+                                           engine->hangcheck.action_timestamp));
 
                seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
                           (long long)engine->hangcheck.acthd,
                           (long long)acthd[id]);
-               seq_printf(m, "\taction = %s(%d) %d ms ago\n",
-                          hangcheck_action_to_str(engine->hangcheck.action),
-                          engine->hangcheck.action,
-                          jiffies_to_msecs(jiffies -
-                                           engine->hangcheck.action_timestamp));
 
                if (engine->id == RCS) {
                        seq_puts(m, "\tinstdone read =\n");
@@ -1616,18 +1557,17 @@ static int gen6_drpc_info(struct seq_file *m)
 static int i915_drpc_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       int err;
-
-       intel_runtime_pm_get(dev_priv);
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               err = vlv_drpc_info(m);
-       else if (INTEL_GEN(dev_priv) >= 6)
-               err = gen6_drpc_info(m);
-       else
-               err = ironlake_drpc_info(m);
-
-       intel_runtime_pm_put(dev_priv);
+       intel_wakeref_t wakeref;
+       int err = -ENODEV;
+
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       err = vlv_drpc_info(m);
+               else if (INTEL_GEN(dev_priv) >= 6)
+                       err = gen6_drpc_info(m);
+               else
+                       err = ironlake_drpc_info(m);
+       }
 
        return err;
 }
@@ -1649,11 +1589,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_fbc *fbc = &dev_priv->fbc;
+       intel_wakeref_t wakeref;
 
        if (!HAS_FBC(dev_priv))
                return -ENODEV;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
        mutex_lock(&fbc->lock);
 
        if (intel_fbc_is_active(dev_priv))
@@ -1680,7 +1621,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
        }
 
        mutex_unlock(&fbc->lock);
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return 0;
 }
@@ -1725,11 +1666,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
 static int i915_ips_status(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       intel_wakeref_t wakeref;
 
        if (!HAS_IPS(dev_priv))
                return -ENODEV;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        seq_printf(m, "Enabled by kernel parameter: %s\n",
                   yesno(i915_modparams.enable_ips));
@@ -1743,7 +1685,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
                        seq_puts(m, "Currently: disabled\n");
        }
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return 0;
 }
@@ -1751,10 +1693,10 @@ static int i915_ips_status(struct seq_file *m, void *unused)
 static int i915_sr_status(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       intel_wakeref_t wakeref;
        bool sr_enabled = false;
 
-       intel_runtime_pm_get(dev_priv);
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
        if (INTEL_GEN(dev_priv) >= 9)
                /* no global SR status; inspect per-plane WM */;
@@ -1770,8 +1712,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-       intel_runtime_pm_put(dev_priv);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
 
        seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
 
@@ -1780,31 +1721,24 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 
 static int i915_emon_status(struct seq_file *m, void *unused)
 {
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
-       unsigned long temp, chipset, gfx;
-       int ret;
+       struct drm_i915_private *i915 = node_to_i915(m->private);
+       intel_wakeref_t wakeref;
 
-       if (!IS_GEN5(dev_priv))
+       if (!IS_GEN(i915, 5))
                return -ENODEV;
 
-       intel_runtime_pm_get(dev_priv);
+       with_intel_runtime_pm(i915, wakeref) {
+               unsigned long temp, chipset, gfx;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       temp = i915_mch_val(dev_priv);
-       chipset = i915_chipset_val(dev_priv);
-       gfx = i915_gfx_val(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
+               temp = i915_mch_val(i915);
+               chipset = i915_chipset_val(i915);
+               gfx = i915_gfx_val(i915);
 
-       seq_printf(m, "GMCH temp: %ld\n", temp);
-       seq_printf(m, "Chipset power: %ld\n", chipset);
-       seq_printf(m, "GFX power: %ld\n", gfx);
-       seq_printf(m, "Total power: %ld\n", chipset + gfx);
-
-       intel_runtime_pm_put(dev_priv);
+               seq_printf(m, "GMCH temp: %ld\n", temp);
+               seq_printf(m, "Chipset power: %ld\n", chipset);
+               seq_printf(m, "GFX power: %ld\n", gfx);
+               seq_printf(m, "Total power: %ld\n", chipset + gfx);
+       }
 
        return 0;
 }
@@ -1814,13 +1748,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
        unsigned int max_gpu_freq, min_gpu_freq;
+       intel_wakeref_t wakeref;
        int gpu_freq, ia_freq;
        int ret;
 
        if (!HAS_LLC(dev_priv))
                return -ENODEV;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
        if (ret)
@@ -1853,7 +1788,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        mutex_unlock(&dev_priv->pcu_lock);
 
 out:
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        return ret;
 }
 
@@ -2026,15 +1961,16 @@ static const char *swizzle_string(unsigned swizzle)
 static int i915_swizzle_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
                   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
        seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
                   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
 
-       if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
+       if (IS_GEN_RANGE(dev_priv, 3, 4)) {
                seq_printf(m, "DDC = 0x%08x\n",
                           I915_READ(DCC));
                seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2065,141 +2001,11 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
                seq_puts(m, "L-shaped memory detected\n");
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return 0;
 }
 
-static int per_file_ctx(int id, void *ptr, void *data)
-{
-       struct i915_gem_context *ctx = ptr;
-       struct seq_file *m = data;
-       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-
-       if (!ppgtt) {
-               seq_printf(m, "  no ppgtt for context %d\n",
-                          ctx->user_handle);
-               return 0;
-       }
-
-       if (i915_gem_context_is_default(ctx))
-               seq_puts(m, "  default context:\n");
-       else
-               seq_printf(m, "  context %d:\n", ctx->user_handle);
-       ppgtt->debug_dump(ppgtt, m);
-
-       return 0;
-}
-
-static void gen8_ppgtt_info(struct seq_file *m,
-                           struct drm_i915_private *dev_priv)
-{
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int i;
-
-       if (!ppgtt)
-               return;
-
-       for_each_engine(engine, dev_priv, id) {
-               seq_printf(m, "%s\n", engine->name);
-               for (i = 0; i < 4; i++) {
-                       u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
-                       pdp <<= 32;
-                       pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
-                       seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
-               }
-       }
-}
-
-static void gen6_ppgtt_info(struct seq_file *m,
-                           struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       if (IS_GEN6(dev_priv))
-               seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
-
-       for_each_engine(engine, dev_priv, id) {
-               seq_printf(m, "%s\n", engine->name);
-               if (IS_GEN7(dev_priv))
-                       seq_printf(m, "GFX_MODE: 0x%08x\n",
-                                  I915_READ(RING_MODE_GEN7(engine)));
-               seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
-                          I915_READ(RING_PP_DIR_BASE(engine)));
-               seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
-                          I915_READ(RING_PP_DIR_BASE_READ(engine)));
-               seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
-                          I915_READ(RING_PP_DIR_DCLV(engine)));
-       }
-       if (dev_priv->mm.aliasing_ppgtt) {
-               struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
-               seq_puts(m, "aliasing PPGTT:\n");
-               seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
-
-               ppgtt->debug_dump(ppgtt, m);
-       }
-
-       seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
-}
-
-static int i915_ppgtt_info(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_file *file;
-       int ret;
-
-       mutex_lock(&dev->filelist_mutex);
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               goto out_unlock;
-
-       intel_runtime_pm_get(dev_priv);
-
-       if (INTEL_GEN(dev_priv) >= 8)
-               gen8_ppgtt_info(m, dev_priv);
-       else if (INTEL_GEN(dev_priv) >= 6)
-               gen6_ppgtt_info(m, dev_priv);
-
-       list_for_each_entry_reverse(file, &dev->filelist, lhead) {
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-               struct task_struct *task;
-
-               task = get_pid_task(file->pid, PIDTYPE_PID);
-               if (!task) {
-                       ret = -ESRCH;
-                       goto out_rpm;
-               }
-               seq_printf(m, "\nproc: %s\n", task->comm);
-               put_task_struct(task);
-               idr_for_each(&file_priv->context_idr, per_file_ctx,
-                            (void *)(unsigned long)m);
-       }
-
-out_rpm:
-       intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
-out_unlock:
-       mutex_unlock(&dev->filelist_mutex);
-       return ret;
-}
-
-static int count_irq_waiters(struct drm_i915_private *i915)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int count = 0;
-
-       for_each_engine(engine, i915, id)
-               count += intel_engine_has_waiter(engine);
-
-       return count;
-}
-
 static const char *rps_power_to_str(unsigned int power)
 {
        static const char * const strings[] = {
@@ -2220,9 +2026,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
        struct drm_device *dev = &dev_priv->drm;
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 act_freq = rps->cur_freq;
+       intel_wakeref_t wakeref;
        struct drm_file *file;
 
-       if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+       with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
                if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                        mutex_lock(&dev_priv->pcu_lock);
                        act_freq = vlv_punit_read(dev_priv,
@@ -2233,13 +2040,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                        act_freq = intel_get_cagf(dev_priv,
                                                  I915_READ(GEN6_RPSTAT1));
                }
-               intel_runtime_pm_put(dev_priv);
        }
 
        seq_printf(m, "RPS enabled? %d\n", rps->enabled);
        seq_printf(m, "GPU busy? %s [%d requests]\n",
                   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
-       seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
        seq_printf(m, "Boosts outstanding? %d\n",
                   atomic_read(&rps->num_waiters));
        seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -2316,6 +2121,7 @@ static int i915_llc(struct seq_file *m, void *data)
 static int i915_huc_load_status_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       intel_wakeref_t wakeref;
        struct drm_printer p;
 
        if (!HAS_HUC(dev_priv))
@@ -2324,9 +2130,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
        p = drm_seq_file_printer(m);
        intel_uc_fw_dump(&dev_priv->huc.fw, &p);
 
-       intel_runtime_pm_get(dev_priv);
-       seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref)
+               seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
 
        return 0;
 }
@@ -2334,8 +2139,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       intel_wakeref_t wakeref;
        struct drm_printer p;
-       u32 tmp, i;
 
        if (!HAS_GUC(dev_priv))
                return -ENODEV;
@@ -2343,22 +2148,23 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
        p = drm_seq_file_printer(m);
        intel_uc_fw_dump(&dev_priv->guc.fw, &p);
 
-       intel_runtime_pm_get(dev_priv);
-
-       tmp = I915_READ(GUC_STATUS);
-
-       seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
-       seq_printf(m, "\tBootrom status = 0x%x\n",
-               (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
-       seq_printf(m, "\tuKernel status = 0x%x\n",
-               (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
-       seq_printf(m, "\tMIA Core status = 0x%x\n",
-               (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
-       seq_puts(m, "\nScratch registers:\n");
-       for (i = 0; i < 16; i++)
-               seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
-
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               u32 tmp = I915_READ(GUC_STATUS);
+               u32 i;
+
+               seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
+               seq_printf(m, "\tBootrom status = 0x%x\n",
+                          (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+               seq_printf(m, "\tuKernel status = 0x%x\n",
+                          (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+               seq_printf(m, "\tMIA Core status = 0x%x\n",
+                          (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
+               seq_puts(m, "\nScratch registers:\n");
+               for (i = 0; i < 16; i++) {
+                       seq_printf(m, "\t%2d: \t0x%x\n",
+                                  i, I915_READ(SOFT_SCRATCH(i)));
+               }
+       }
 
        return 0;
 }
@@ -2410,7 +2216,7 @@ static void i915_guc_client_info(struct seq_file *m,
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       uint64_t tot = 0;
+       u64 tot = 0;
 
        seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
                client->priority, client->stage_id, client->proc_desc_offset);
@@ -2665,7 +2471,8 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
 static void
 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
 {
-       u32 val, psr_status;
+       u32 val, status_val;
+       const char *status = "unknown";
 
        if (dev_priv->psr.psr2_enabled) {
                static const char * const live_status[] = {
@@ -2681,14 +2488,11 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
                        "BUF_ON",
                        "TG_ON"
                };
-               psr_status = I915_READ(EDP_PSR2_STATUS);
-               val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
-                       EDP_PSR2_STATUS_STATE_SHIFT;
-               if (val < ARRAY_SIZE(live_status)) {
-                       seq_printf(m, "Source PSR status: 0x%x [%s]\n",
-                                  psr_status, live_status[val]);
-                       return;
-               }
+               val = I915_READ(EDP_PSR2_STATUS);
+               status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
+                             EDP_PSR2_STATUS_STATE_SHIFT;
+               if (status_val < ARRAY_SIZE(live_status))
+                       status = live_status[status_val];
        } else {
                static const char * const live_status[] = {
                        "IDLE",
@@ -2700,74 +2504,102 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
                        "SRDOFFACK",
                        "SRDENT_ON",
                };
-               psr_status = I915_READ(EDP_PSR_STATUS);
-               val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
-                       EDP_PSR_STATUS_STATE_SHIFT;
-               if (val < ARRAY_SIZE(live_status)) {
-                       seq_printf(m, "Source PSR status: 0x%x [%s]\n",
-                                  psr_status, live_status[val]);
-                       return;
-               }
+               val = I915_READ(EDP_PSR_STATUS);
+               status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+                             EDP_PSR_STATUS_STATE_SHIFT;
+               if (status_val < ARRAY_SIZE(live_status))
+                       status = live_status[status_val];
        }
 
-       seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
+       seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
 }
 
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       u32 psrperf = 0;
-       bool enabled = false;
-       bool sink_support;
+       struct i915_psr *psr = &dev_priv->psr;
+       intel_wakeref_t wakeref;
+       const char *status;
+       bool enabled;
+       u32 val;
 
        if (!HAS_PSR(dev_priv))
                return -ENODEV;
 
-       sink_support = dev_priv->psr.sink_support;
-       seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
-       if (!sink_support)
-               return 0;
+       seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+       if (psr->dp)
+               seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+       seq_puts(m, "\n");
 
-       intel_runtime_pm_get(dev_priv);
+       if (!psr->sink_support)
+               return 0;
 
-       mutex_lock(&dev_priv->psr.lock);
-       seq_printf(m, "PSR mode: %s\n",
-                  dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
-       seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
-       seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
-                  dev_priv->psr.busy_frontbuffer_bits);
+       wakeref = intel_runtime_pm_get(dev_priv);
+       mutex_lock(&psr->lock);
 
-       if (dev_priv->psr.psr2_enabled)
-               enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+       if (psr->enabled)
+               status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
        else
-               enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+               status = "disabled";
+       seq_printf(m, "PSR mode: %s\n", status);
 
-       seq_printf(m, "Main link in standby mode: %s\n",
-                  yesno(dev_priv->psr.link_standby));
+       if (!psr->enabled)
+               goto unlock;
 
-       seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
+       if (psr->psr2_enabled) {
+               val = I915_READ(EDP_PSR2_CTL);
+               enabled = val & EDP_PSR2_ENABLE;
+       } else {
+               val = I915_READ(EDP_PSR_CTL);
+               enabled = val & EDP_PSR_ENABLE;
+       }
+       seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+                  enableddisabled(enabled), val);
+       psr_source_status(dev_priv, m);
+       seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+                  psr->busy_frontbuffer_bits);
 
        /*
         * SKL+ Perf counter is reset to 0 everytime DC state is entered
         */
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               psrperf = I915_READ(EDP_PSR_PERF_CNT) &
-                       EDP_PSR_PERF_CNT_MASK;
+               val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
+               seq_printf(m, "Performance counter: %u\n", val);
+       }
 
-               seq_printf(m, "Performance_Counter: %u\n", psrperf);
+       if (psr->debug & I915_PSR_DEBUG_IRQ) {
+               seq_printf(m, "Last attempted entry at: %lld\n",
+                          psr->last_entry_attempt);
+               seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
        }
 
-       psr_source_status(dev_priv, m);
-       mutex_unlock(&dev_priv->psr.lock);
+       if (psr->psr2_enabled) {
+               u32 su_frames_val[3];
+               int frame;
 
-       if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
-               seq_printf(m, "Last attempted entry at: %lld\n",
-                          dev_priv->psr.last_entry_attempt);
-               seq_printf(m, "Last exit at: %lld\n",
-                          dev_priv->psr.last_exit);
+               /*
+                * Reading all 3 registers before hand to minimize crossing a
+                * frame boundary between register reads
+                */
+               for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
+                       su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
+
+               seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+               for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+                       u32 su_blocks;
+
+                       su_blocks = su_frames_val[frame / 3] &
+                                   PSR2_SU_STATUS_MASK(frame);
+                       su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+                       seq_printf(m, "%d\t%d\n", frame, su_blocks);
+               }
        }
 
-       intel_runtime_pm_put(dev_priv);
+unlock:
+       mutex_unlock(&psr->lock);
+       intel_runtime_pm_put(dev_priv, wakeref);
+
        return 0;
 }
 
@@ -2776,6 +2608,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
        struct drm_modeset_acquire_ctx ctx;
+       intel_wakeref_t wakeref;
        int ret;
 
        if (!CAN_PSR(dev_priv))
@@ -2783,7 +2616,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
 
        DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
 
@@ -2798,7 +2631,7 @@ retry:
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return ret;
 }
@@ -2823,24 +2656,20 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        unsigned long long power;
+       intel_wakeref_t wakeref;
        u32 units;
 
        if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       intel_runtime_pm_get(dev_priv);
-
-       if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
-               intel_runtime_pm_put(dev_priv);
+       if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
                return -ENODEV;
-       }
 
        units = (power & 0x1f00) >> 8;
-       power = I915_READ(MCH_SECP_NRG_STTS);
-       power = (1000000 * power) >> units; /* convert to uJ */
-
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref)
+               power = I915_READ(MCH_SECP_NRG_STTS);
 
+       power = (1000000 * power) >> units; /* convert to uJ */
        seq_printf(m, "%llu", power);
 
        return 0;
@@ -2854,6 +2683,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
        if (!HAS_RUNTIME_PM(dev_priv))
                seq_puts(m, "Runtime power management not supported\n");
 
+       seq_printf(m, "Runtime power status: %s\n",
+                  enableddisabled(!dev_priv->power_domains.wakeref));
+
        seq_printf(m, "GPU idle: %s (epoch %u)\n",
                   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
        seq_printf(m, "IRQs disabled: %s\n",
@@ -2868,6 +2700,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
                   pci_power_name(pdev->current_state),
                   pdev->current_state);
 
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
+               struct drm_printer p = drm_seq_file_printer(m);
+
+               print_intel_runtime_pm_wakeref(dev_priv, &p);
+       }
+
        return 0;
 }
 
@@ -2902,6 +2740,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
 static int i915_dmc_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       intel_wakeref_t wakeref;
        struct intel_csr *csr;
 
        if (!HAS_CSR(dev_priv))
@@ -2909,7 +2748,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
 
        csr = &dev_priv->csr;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
        seq_printf(m, "path: %s\n", csr->fw_path);
@@ -2935,7 +2774,7 @@ out:
        seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
        seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return 0;
 }
@@ -2948,14 +2787,7 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs,
        for (i = 0; i < tabs; i++)
                seq_putc(m, '\t');
 
-       seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
-                  mode->base.id, mode->name,
-                  mode->vrefresh, mode->clock,
-                  mode->hdisplay, mode->hsync_start,
-                  mode->hsync_end, mode->htotal,
-                  mode->vdisplay, mode->vsync_start,
-                  mode->vsync_end, mode->vtotal,
-                  mode->type, mode->flags);
+       seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 }
 
 static void intel_encoder_info(struct seq_file *m,
@@ -3127,14 +2959,13 @@ static const char *plane_type(enum drm_plane_type type)
        return "unknown";
 }
 
-static const char *plane_rotation(unsigned int rotation)
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
 {
-       static char buf[48];
        /*
         * According to doc only one DRM_MODE_ROTATE_ is allowed but this
         * will print them all to visualize if the values are misused
         */
-       snprintf(buf, sizeof(buf),
+       snprintf(buf, bufsize,
                 "%s%s%s%s%s%s(0x%08x)",
                 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
                 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
@@ -3143,8 +2974,6 @@ static const char *plane_rotation(unsigned int rotation)
                 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
                 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
                 rotation);
-
-       return buf;
 }
 
 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
@@ -3157,6 +2986,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
                struct drm_plane_state *state;
                struct drm_plane *plane = &intel_plane->base;
                struct drm_format_name_buf format_name;
+               char rot_str[48];
 
                if (!plane->state) {
                        seq_puts(m, "plane->state is NULL!\n");
@@ -3172,6 +3002,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
                        sprintf(format_name.str, "N/A");
                }
 
+               plane_rotation(rot_str, sizeof(rot_str), state->rotation);
+
                seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
                           plane->base.id,
                           plane_type(intel_plane->base.type),
@@ -3186,7 +3018,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
                           (state->src_h >> 16),
                           ((state->src_h & 0xffff) * 15625) >> 10,
                           format_name.str,
-                          plane_rotation(state->rotation));
+                          rot_str);
        }
 }
 
@@ -3225,8 +3057,10 @@ static int i915_display_info(struct seq_file *m, void *unused)
        struct intel_crtc *crtc;
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
+       intel_wakeref_t wakeref;
+
+       wakeref = intel_runtime_pm_get(dev_priv);
 
-       intel_runtime_pm_get(dev_priv);
        seq_printf(m, "CRTC info\n");
        seq_printf(m, "---------\n");
        for_each_intel_crtc(dev, crtc) {
@@ -3274,7 +3108,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
        drm_connector_list_iter_end(&conn_iter);
        mutex_unlock(&dev->mode_config.mutex);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return 0;
 }
@@ -3283,23 +3117,24 @@ static int i915_engine_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
+       intel_wakeref_t wakeref;
        enum intel_engine_id id;
        struct drm_printer p;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        seq_printf(m, "GT awake? %s (epoch %u)\n",
                   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
        seq_printf(m, "Global active requests: %d\n",
                   dev_priv->gt.active_requests);
        seq_printf(m, "CS timestamp frequency: %u kHz\n",
-                  dev_priv->info.cs_timestamp_frequency_khz);
+                  RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
 
        p = drm_seq_file_printer(m);
        for_each_engine(engine, dev_priv, id)
                intel_engine_dump(engine, &p, "%s\n", engine->name);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return 0;
 }
@@ -3309,7 +3144,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_printer p = drm_seq_file_printer(m);
 
-       intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
+       intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
 
        return 0;
 }
@@ -3412,20 +3247,21 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_i915_private *dev_priv = m->private;
-       int ret;
+       intel_wakeref_t wakeref;
        bool enable;
+       int ret;
 
        ret = kstrtobool_from_user(ubuf, len, &enable);
        if (ret < 0)
                return ret;
 
-       intel_runtime_pm_get(dev_priv);
-       if (!dev_priv->ipc_enabled && enable)
-               DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
-       dev_priv->wm.distrust_bios_wm = true;
-       dev_priv->ipc_enabled = enable;
-       intel_enable_ipc(dev_priv);
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               if (!dev_priv->ipc_enabled && enable)
+                       DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
+               dev_priv->wm.distrust_bios_wm = true;
+               dev_priv->ipc_enabled = enable;
+               intel_enable_ipc(dev_priv);
+       }
 
        return len;
 }
@@ -3793,7 +3629,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
 
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
 {
        struct drm_i915_private *dev_priv = m->private;
        struct drm_device *dev = &dev_priv->drm;
@@ -3836,7 +3672,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = m->private;
-       const uint16_t *latencies;
+       const u16 *latencies;
 
        if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
@@ -3851,7 +3687,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = m->private;
-       const uint16_t *latencies;
+       const u16 *latencies;
 
        if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
@@ -3866,7 +3702,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = m->private;
-       const uint16_t *latencies;
+       const u16 *latencies;
 
        if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
@@ -3892,7 +3728,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
 {
        struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                return -ENODEV;
 
        return single_open(file, spr_wm_latency_show, dev_priv);
@@ -3902,19 +3738,19 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
 {
        struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                return -ENODEV;
 
        return single_open(file, cur_wm_latency_show, dev_priv);
 }
 
 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
-                               size_t len, loff_t *offp, uint16_t wm[8])
+                               size_t len, loff_t *offp, u16 wm[8])
 {
        struct seq_file *m = file->private_data;
        struct drm_i915_private *dev_priv = m->private;
        struct drm_device *dev = &dev_priv->drm;
-       uint16_t new[8] = { 0 };
+       u16 new[8] = { 0 };
        int num_levels;
        int level;
        int ret;
@@ -3959,7 +3795,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_i915_private *dev_priv = m->private;
-       uint16_t *latencies;
+       u16 *latencies;
 
        if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
@@ -3974,7 +3810,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_i915_private *dev_priv = m->private;
-       uint16_t *latencies;
+       u16 *latencies;
 
        if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
@@ -3989,7 +3825,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_i915_private *dev_priv = m->private;
-       uint16_t *latencies;
+       u16 *latencies;
 
        if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
@@ -4040,8 +3876,6 @@ static int
 i915_wedged_set(void *data, u64 val)
 {
        struct drm_i915_private *i915 = data;
-       struct intel_engine_cs *engine;
-       unsigned int tmp;
 
        /*
         * There is no safeguard against this debugfs entry colliding
@@ -4054,18 +3888,8 @@ i915_wedged_set(void *data, u64 val)
        if (i915_reset_backoff(&i915->gpu_error))
                return -EAGAIN;
 
-       for_each_engine_masked(engine, i915, val, tmp) {
-               engine->hangcheck.seqno = intel_engine_get_seqno(engine);
-               engine->hangcheck.stalled = true;
-       }
-
        i915_handle_error(i915, val, I915_ERROR_CAPTURE,
                          "Manually set wedged engine mask = %llx", val);
-
-       wait_on_bit(&i915->gpu_error.flags,
-                   I915_RESET_HANDOFF,
-                   TASK_UNINTERRUPTIBLE);
-
        return 0;
 }
 
@@ -4073,94 +3897,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
                        i915_wedged_get, i915_wedged_set,
                        "%llu\n");
 
-static int
-fault_irq_set(struct drm_i915_private *i915,
-             unsigned long *irq,
-             unsigned long val)
-{
-       int err;
-
-       err = mutex_lock_interruptible(&i915->drm.struct_mutex);
-       if (err)
-               return err;
-
-       err = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_LOCKED |
-                                    I915_WAIT_INTERRUPTIBLE,
-                                    MAX_SCHEDULE_TIMEOUT);
-       if (err)
-               goto err_unlock;
-
-       *irq = val;
-       mutex_unlock(&i915->drm.struct_mutex);
-
-       /* Flush idle worker to disarm irq */
-       drain_delayed_work(&i915->gt.idle_work);
-
-       return 0;
-
-err_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
-       return err;
-}
-
-static int
-i915_ring_missed_irq_get(void *data, u64 *val)
-{
-       struct drm_i915_private *dev_priv = data;
-
-       *val = dev_priv->gpu_error.missed_irq_rings;
-       return 0;
-}
-
-static int
-i915_ring_missed_irq_set(void *data, u64 val)
-{
-       struct drm_i915_private *i915 = data;
-
-       return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
-                       i915_ring_missed_irq_get, i915_ring_missed_irq_set,
-                       "0x%08llx\n");
-
-static int
-i915_ring_test_irq_get(void *data, u64 *val)
-{
-       struct drm_i915_private *dev_priv = data;
-
-       *val = dev_priv->gpu_error.test_irq_rings;
-
-       return 0;
-}
-
-static int
-i915_ring_test_irq_set(void *data, u64 val)
-{
-       struct drm_i915_private *i915 = data;
-
-       /* GuC keeps the user interrupt permanently enabled for submission */
-       if (USES_GUC_SUBMISSION(i915))
-               return -ENODEV;
-
-       /*
-        * From icl, we can no longer individually mask interrupt generation
-        * from each engine.
-        */
-       if (INTEL_GEN(i915) >= 11)
-               return -ENODEV;
-
-       val &= INTEL_INFO(i915)->ring_mask;
-       DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
-
-       return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
-                       i915_ring_test_irq_get, i915_ring_test_irq_set,
-                       "0x%08llx\n");
-
 #define DROP_UNBOUND   BIT(0)
 #define DROP_BOUND     BIT(1)
 #define DROP_RETIRE    BIT(2)
@@ -4191,13 +3927,15 @@ static int
 i915_drop_caches_set(void *data, u64 val)
 {
        struct drm_i915_private *i915 = data;
+       intel_wakeref_t wakeref;
        int ret = 0;
 
        DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
                  val, val & DROP_ALL);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
-       if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
+       if (val & DROP_RESET_ACTIVE &&
+           wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
                i915_gem_set_wedged(i915);
 
        /* No need to check and wait for gpu resets, only libdrm auto-restarts
@@ -4213,22 +3951,14 @@ i915_drop_caches_set(void *data, u64 val)
                                                     I915_WAIT_LOCKED,
                                                     MAX_SCHEDULE_TIMEOUT);
 
-               if (ret == 0 && val & DROP_RESET_SEQNO)
-                       ret = i915_gem_set_global_seqno(&i915->drm, 1);
-
                if (val & DROP_RETIRE)
                        i915_retire_requests(i915);
 
                mutex_unlock(&i915->drm.struct_mutex);
        }
 
-       if (val & DROP_RESET_ACTIVE &&
-           i915_terminally_wedged(&i915->gpu_error)) {
+       if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
                i915_handle_error(i915, ALL_ENGINES, 0, NULL);
-               wait_on_bit(&i915->gpu_error.flags,
-                           I915_RESET_HANDOFF,
-                           TASK_UNINTERRUPTIBLE);
-       }
 
        fs_reclaim_acquire(GFP_KERNEL);
        if (val & DROP_BOUND)
@@ -4253,7 +3983,7 @@ i915_drop_caches_set(void *data, u64 val)
                i915_gem_drain_freed_objects(i915);
 
 out:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        return ret;
 }
@@ -4266,16 +3996,14 @@ static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
        struct drm_i915_private *dev_priv = data;
-       u32 snpcr;
+       intel_wakeref_t wakeref;
+       u32 snpcr = 0;
 
-       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+       if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
                return -ENODEV;
 
-       intel_runtime_pm_get(dev_priv);
-
-       snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref)
+               snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
        *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -4286,24 +4014,25 @@ static int
 i915_cache_sharing_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
-       u32 snpcr;
+       intel_wakeref_t wakeref;
 
-       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+       if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
                return -ENODEV;
 
        if (val > 3)
                return -EINVAL;
 
-       intel_runtime_pm_get(dev_priv);
        DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               u32 snpcr;
+
+               /* Update the cache sharing policy here as well */
+               snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+               snpcr &= ~GEN6_MBC_SNPCR_MASK;
+               snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
+               I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+       }
 
-       /* Update the cache sharing policy here as well */
-       snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-       snpcr &= ~GEN6_MBC_SNPCR_MASK;
-       snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
-       I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
-
-       intel_runtime_pm_put(dev_priv);
        return 0;
 }
 
@@ -4348,7 +4077,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
                                     struct sseu_dev_info *sseu)
 {
 #define SS_MAX 6
-       const struct intel_device_info *info = INTEL_INFO(dev_priv);
+       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
        int s, ss;
 
@@ -4404,7 +4133,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
                                    struct sseu_dev_info *sseu)
 {
 #define SS_MAX 3
-       const struct intel_device_info *info = INTEL_INFO(dev_priv);
+       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
        int s, ss;
 
@@ -4432,7 +4161,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
 
                if (IS_GEN9_BC(dev_priv))
                        sseu->subslice_mask[s] =
-                               INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
 
                for (ss = 0; ss < info->sseu.max_subslices; ss++) {
                        unsigned int eu_cnt;
@@ -4466,10 +4195,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
 
        if (sseu->slice_mask) {
                sseu->eu_per_subslice =
-                               INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+                       RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
                for (s = 0; s < fls(sseu->slice_mask); s++) {
                        sseu->subslice_mask[s] =
-                               INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
                }
                sseu->eu_total = sseu->eu_per_subslice *
                                 sseu_subslice_total(sseu);
@@ -4477,7 +4206,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
                /* subtract fused off EU(s) from enabled slice(s) */
                for (s = 0; s < fls(sseu->slice_mask); s++) {
                        u8 subslice_7eu =
-                               INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
 
                        sseu->eu_total -= hweight8(subslice_7eu);
                }
@@ -4525,34 +4254,32 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct sseu_dev_info sseu;
+       intel_wakeref_t wakeref;
 
        if (INTEL_GEN(dev_priv) < 8)
                return -ENODEV;
 
        seq_puts(m, "SSEU Device Info\n");
-       i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
+       i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
 
        seq_puts(m, "SSEU Device Status\n");
        memset(&sseu, 0, sizeof(sseu));
-       sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
-       sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
+       sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
+       sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
        sseu.max_eus_per_subslice =
-               INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
-
-       intel_runtime_pm_get(dev_priv);
-
-       if (IS_CHERRYVIEW(dev_priv)) {
-               cherryview_sseu_device_status(dev_priv, &sseu);
-       } else if (IS_BROADWELL(dev_priv)) {
-               broadwell_sseu_device_status(dev_priv, &sseu);
-       } else if (IS_GEN9(dev_priv)) {
-               gen9_sseu_device_status(dev_priv, &sseu);
-       } else if (INTEL_GEN(dev_priv) >= 10) {
-               gen10_sseu_device_status(dev_priv, &sseu);
+               RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
+
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               if (IS_CHERRYVIEW(dev_priv))
+                       cherryview_sseu_device_status(dev_priv, &sseu);
+               else if (IS_BROADWELL(dev_priv))
+                       broadwell_sseu_device_status(dev_priv, &sseu);
+               else if (IS_GEN(dev_priv, 9))
+                       gen9_sseu_device_status(dev_priv, &sseu);
+               else if (INTEL_GEN(dev_priv) >= 10)
+                       gen10_sseu_device_status(dev_priv, &sseu);
        }
 
-       intel_runtime_pm_put(dev_priv);
-
        i915_print_sseu_info(m, false, &sseu);
 
        return 0;
@@ -4565,7 +4292,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
        if (INTEL_GEN(i915) < 6)
                return 0;
 
-       intel_runtime_pm_get(i915);
+       file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
        intel_uncore_forcewake_user_get(i915);
 
        return 0;
@@ -4579,7 +4306,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
                return 0;
 
        intel_uncore_forcewake_user_put(i915);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915,
+                            (intel_wakeref_t)(uintptr_t)file->private_data);
 
        return 0;
 }
@@ -4906,7 +4634,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_context_status", i915_context_status, 0},
        {"i915_forcewake_domains", i915_forcewake_domains, 0},
        {"i915_swizzle_info", i915_swizzle_info, 0},
-       {"i915_ppgtt_info", i915_ppgtt_info, 0},
        {"i915_llc", i915_llc, 0},
        {"i915_edp_psr_status", i915_edp_psr_status, 0},
        {"i915_energy_uJ", i915_energy_uJ, 0},
@@ -4933,15 +4660,12 @@ static const struct i915_debugfs_files {
 } i915_debugfs_files[] = {
        {"i915_wedged", &i915_wedged_fops},
        {"i915_cache_sharing", &i915_cache_sharing_fops},
-       {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
-       {"i915_ring_test_irq", &i915_ring_test_irq_fops},
        {"i915_gem_drop_caches", &i915_drop_caches_fops},
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
        {"i915_error_state", &i915_error_state_fops},
        {"i915_gpu_info", &i915_gpu_info_fops},
 #endif
        {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
-       {"i915_next_seqno", &i915_next_seqno_fops},
        {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
        {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
        {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -5014,7 +4738,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
        struct drm_connector *connector = m->private;
        struct intel_dp *intel_dp =
                enc_to_intel_dp(&intel_attached_encoder(connector)->base);
-       uint8_t buf[16];
+       u8 buf[16];
        ssize_t err;
        int i;
 
@@ -5088,6 +4812,105 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
 
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct drm_device *dev = connector->dev;
+       struct drm_crtc *crtc;
+       struct intel_dp *intel_dp;
+       struct drm_modeset_acquire_ctx ctx;
+       struct intel_crtc_state *crtc_state = NULL;
+       int ret = 0;
+       bool try_again = false;
+
+       drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+       do {
+               try_again = false;
+               ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+                                      &ctx);
+               if (ret) {
+                       ret = -EINTR;
+                       break;
+               }
+               crtc = connector->state->crtc;
+               if (connector->status != connector_status_connected || !crtc) {
+                       ret = -ENODEV;
+                       break;
+               }
+               ret = drm_modeset_lock(&crtc->mutex, &ctx);
+               if (ret == -EDEADLK) {
+                       ret = drm_modeset_backoff(&ctx);
+                       if (!ret) {
+                               try_again = true;
+                               continue;
+                       }
+                       break;
+               } else if (ret) {
+                       break;
+               }
+               intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+               crtc_state = to_intel_crtc_state(crtc->state);
+               seq_printf(m, "DSC_Enabled: %s\n",
+                          yesno(crtc_state->dsc_params.compression_enable));
+               seq_printf(m, "DSC_Sink_Support: %s\n",
+                          yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+               if (!intel_dp_is_edp(intel_dp))
+                       seq_printf(m, "FEC_Sink_Support: %s\n",
+                                  yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+       } while (try_again);
+
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
+       return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+                                         const char __user *ubuf,
+                                         size_t len, loff_t *offp)
+{
+       bool dsc_enable = false;
+       int ret;
+       struct drm_connector *connector =
+               ((struct seq_file *)file->private_data)->private;
+       struct intel_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+       if (len == 0)
+               return 0;
+
+       DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
+                        len);
+
+       ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+       if (ret < 0)
+               return ret;
+
+       DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
+                        (dsc_enable) ? "true" : "false");
+       intel_dp->force_dsc_en = dsc_enable;
+
+       *offp += len;
+       return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+                                    struct file *file)
+{
+       return single_open(file, i915_dsc_fec_support_show,
+                          inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_dsc_fec_support_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = i915_dsc_fec_support_write
+};
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -5100,6 +4923,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
 int i915_debugfs_connector_add(struct drm_connector *connector)
 {
        struct dentry *root = connector->debugfs_entry;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
 
        /* The connector must have been registered beforehands. */
        if (!root)
@@ -5124,5 +4948,11 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
                                    connector, &i915_hdcp_sink_capability_fops);
        }
 
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+            connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+               debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+                                   connector, &i915_dsc_fec_support_fops);
+
        return 0;
 }
index b310a897a4adab444349252503fba2e92134814f..7de90701f6f175e8e7f5fc777f9305dc7b628494 100644 (file)
 #include <linux/vt.h>
 #include <acpi/video.h>
 
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "i915_pmu.h"
+#include "i915_reset.h"
 #include "i915_query.h"
 #include "i915_vgpu.h"
 #include "intel_drv.h"
@@ -132,15 +132,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
        switch (id) {
        case INTEL_PCH_IBX_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
-               WARN_ON(!IS_GEN5(dev_priv));
+               WARN_ON(!IS_GEN(dev_priv, 5));
                return PCH_IBX;
        case INTEL_PCH_CPT_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-               WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+               WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
                return PCH_CPT;
        case INTEL_PCH_PPT_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-               WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+               WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
                /* PantherPoint is CPT compatible */
                return PCH_CPT;
        case INTEL_PCH_LPT_DEVICE_ID_TYPE:
@@ -217,9 +217,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
         * make an educated guess as to which PCH is really there.
         */
 
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-       else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+       else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
                id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
        else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
                id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
@@ -349,7 +349,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
                break;
        case I915_PARAM_HAS_SEMAPHORES:
-               value = HAS_LEGACY_SEMAPHORES(dev_priv);
+               value = 0;
                break;
        case I915_PARAM_HAS_SECURE_BATCHES:
                value = capable(CAP_SYS_ADMIN);
@@ -358,12 +358,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = i915_cmd_parser_get_version(dev_priv);
                break;
        case I915_PARAM_SUBSLICE_TOTAL:
-               value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
+               value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_EU_TOTAL:
-               value = INTEL_INFO(dev_priv)->sseu.eu_total;
+               value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
                if (!value)
                        return -ENODEV;
                break;
@@ -380,7 +380,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = HAS_POOLED_EU(dev_priv);
                break;
        case I915_PARAM_MIN_EU_IN_POOL:
-               value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+               value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
                break;
        case I915_PARAM_HUC_STATUS:
                value = intel_huc_check_status(&dev_priv->huc);
@@ -430,17 +430,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = intel_engines_has_context_isolation(dev_priv);
                break;
        case I915_PARAM_SLICE_MASK:
-               value = INTEL_INFO(dev_priv)->sseu.slice_mask;
+               value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_SUBSLICE_MASK:
-               value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
+               value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
-               value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+               value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
                break;
        case I915_PARAM_MMAP_GTT_COHERENT:
                value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
@@ -906,6 +906,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
        mutex_init(&dev_priv->pps_mutex);
 
        i915_memcpy_init_early(dev_priv);
+       intel_runtime_pm_init_early(dev_priv);
 
        ret = i915_workqueues_init(dev_priv);
        if (ret < 0)
@@ -966,7 +967,7 @@ static int i915_mmio_setup(struct drm_i915_private *dev_priv)
        int mmio_bar;
        int mmio_size;
 
-       mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
+       mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
        /*
         * Before gen4, the registers and the GTT are behind different BARs.
         * However, from gen4 onwards, the registers and the GTT are shared
@@ -1341,7 +1342,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
        /* Need to calculate bandwidth only for Gen9 */
        if (IS_BROXTON(dev_priv))
                ret = bxt_get_dram_info(dev_priv);
-       else if (IS_GEN9(dev_priv))
+       else if (IS_GEN(dev_priv, 9))
                ret = skl_get_dram_info(dev_priv);
        else
                ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,7 +1375,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
        if (i915_inject_load_failure())
                return -ENODEV;
 
-       intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
+       intel_device_info_runtime_init(dev_priv);
 
        if (HAS_PPGTT(dev_priv)) {
                if (intel_vgpu_active(dev_priv) &&
@@ -1436,7 +1437,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
        pci_set_master(pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
-       if (IS_GEN2(dev_priv)) {
+       if (IS_GEN(dev_priv, 2)) {
                ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
                if (ret) {
                        DRM_ERROR("failed to set DMA mask\n");
@@ -1574,7 +1575,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
                acpi_video_register();
        }
 
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                intel_gpu_ips_init(dev_priv);
 
        intel_audio_init(dev_priv);
@@ -1636,8 +1637,14 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
        if (drm_debug & DRM_UT_DRIVER) {
                struct drm_printer p = drm_debug_printer("i915 device info:");
 
-               intel_device_info_dump(&dev_priv->info, &p);
-               intel_device_info_dump_runtime(&dev_priv->info, &p);
+               drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+                          INTEL_DEVID(dev_priv),
+                          INTEL_REVID(dev_priv),
+                          intel_platform_name(INTEL_INFO(dev_priv)->platform),
+                          INTEL_GEN(dev_priv));
+
+               intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
+               intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
        }
 
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1674,7 +1681,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Setup the write-once "constant" device info */
        device_info = mkwrite_device_info(i915);
        memcpy(device_info, match_info, sizeof(*device_info));
-       device_info->device_id = pdev->device;
+       RUNTIME_INFO(i915)->device_id = pdev->device;
 
        BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
                     BITS_PER_TYPE(device_info->platform_mask));
@@ -1774,6 +1781,9 @@ void i915_driver_unload(struct drm_device *dev)
 
        i915_driver_unregister(dev_priv);
 
+       /* Flush any external code that still may be under the RCU lock */
+       synchronize_rcu();
+
        if (i915_gem_suspend(dev_priv))
                DRM_ERROR("failed to idle hardware; continuing to unload!\n");
 
@@ -1802,8 +1812,7 @@ void i915_driver_unload(struct drm_device *dev)
        i915_driver_cleanup_mmio(dev_priv);
 
        enable_rpm_wakeref_asserts(dev_priv);
-
-       WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
+       intel_runtime_pm_cleanup(dev_priv);
 }
 
 static void i915_driver_release(struct drm_device *dev)
@@ -2005,6 +2014,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
 out:
        enable_rpm_wakeref_asserts(dev_priv);
+       if (!dev_priv->uncore.user_forcewake.count)
+               intel_runtime_pm_cleanup(dev_priv);
 
        return ret;
 }
@@ -2174,7 +2185,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_power_domains_resume(dev_priv);
 
-       intel_engines_sanitize(dev_priv);
+       intel_engines_sanitize(dev_priv, true);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
@@ -2195,210 +2206,6 @@ static int i915_resume_switcheroo(struct drm_device *dev)
        return i915_drm_resume(dev);
 }
 
-/**
- * i915_reset - reset chip after a hang
- * @i915: #drm_i915_private to reset
- * @stalled_mask: mask of the stalled engines with the guilty requests
- * @reason: user error message for why we are resetting
- *
- * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
- * on failure.
- *
- * Caller must hold the struct_mutex.
- *
- * Procedure is fairly simple:
- *   - reset the chip using the reset reg
- *   - re-init context state
- *   - re-init hardware status page
- *   - re-init ring buffer
- *   - re-init interrupt state
- *   - re-init display
- */
-void i915_reset(struct drm_i915_private *i915,
-               unsigned int stalled_mask,
-               const char *reason)
-{
-       struct i915_gpu_error *error = &i915->gpu_error;
-       int ret;
-       int i;
-
-       GEM_TRACE("flags=%lx\n", error->flags);
-
-       might_sleep();
-       lockdep_assert_held(&i915->drm.struct_mutex);
-       GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
-
-       if (!test_bit(I915_RESET_HANDOFF, &error->flags))
-               return;
-
-       /* Clear any previous failed attempts at recovery. Time to try again. */
-       if (!i915_gem_unset_wedged(i915))
-               goto wakeup;
-
-       if (reason)
-               dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
-       error->reset_count++;
-
-       ret = i915_gem_reset_prepare(i915);
-       if (ret) {
-               dev_err(i915->drm.dev, "GPU recovery failed\n");
-               goto taint;
-       }
-
-       if (!intel_has_gpu_reset(i915)) {
-               if (i915_modparams.reset)
-                       dev_err(i915->drm.dev, "GPU reset not supported\n");
-               else
-                       DRM_DEBUG_DRIVER("GPU reset disabled\n");
-               goto error;
-       }
-
-       for (i = 0; i < 3; i++) {
-               ret = intel_gpu_reset(i915, ALL_ENGINES);
-               if (ret == 0)
-                       break;
-
-               msleep(100);
-       }
-       if (ret) {
-               dev_err(i915->drm.dev, "Failed to reset chip\n");
-               goto taint;
-       }
-
-       /* Ok, now get things going again... */
-
-       /*
-        * Everything depends on having the GTT running, so we need to start
-        * there.
-        */
-       ret = i915_ggtt_enable_hw(i915);
-       if (ret) {
-               DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
-                         ret);
-               goto error;
-       }
-
-       i915_gem_reset(i915, stalled_mask);
-       intel_overlay_reset(i915);
-
-       /*
-        * Next we need to restore the context, but we don't use those
-        * yet either...
-        *
-        * Ring buffer needs to be re-initialized in the KMS case, or if X
-        * was running at the time of the reset (i.e. we weren't VT
-        * switched away).
-        */
-       ret = i915_gem_init_hw(i915);
-       if (ret) {
-               DRM_ERROR("Failed to initialise HW following reset (%d)\n",
-                         ret);
-               goto error;
-       }
-
-       i915_queue_hangcheck(i915);
-
-finish:
-       i915_gem_reset_finish(i915);
-wakeup:
-       clear_bit(I915_RESET_HANDOFF, &error->flags);
-       wake_up_bit(&error->flags, I915_RESET_HANDOFF);
-       return;
-
-taint:
-       /*
-        * History tells us that if we cannot reset the GPU now, we
-        * never will. This then impacts everything that is run
-        * subsequently. On failing the reset, we mark the driver
-        * as wedged, preventing further execution on the GPU.
-        * We also want to go one step further and add a taint to the
-        * kernel so that any subsequent faults can be traced back to
-        * this failure. This is important for CI, where if the
-        * GPU/driver fails we would like to reboot and restart testing
-        * rather than continue on into oblivion. For everyone else,
-        * the system should still plod along, but they have been warned!
-        */
-       add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-error:
-       i915_gem_set_wedged(i915);
-       i915_retire_requests(i915);
-       goto finish;
-}
-
-static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *engine)
-{
-       return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
-}
-
-/**
- * i915_reset_engine - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no dev_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- *  - identifies the request that caused the hang and it is dropped
- *  - reset engine (which will force the engine to idle)
- *  - re-init/configure engine
- */
-int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
-{
-       struct i915_gpu_error *error = &engine->i915->gpu_error;
-       struct i915_request *active_request;
-       int ret;
-
-       GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
-       GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
-
-       active_request = i915_gem_reset_prepare_engine(engine);
-       if (IS_ERR_OR_NULL(active_request)) {
-               /* Either the previous reset failed, or we pardon the reset. */
-               ret = PTR_ERR(active_request);
-               goto out;
-       }
-
-       if (msg)
-               dev_notice(engine->i915->drm.dev,
-                          "Resetting %s for %s\n", engine->name, msg);
-       error->reset_engine_count[engine->id]++;
-
-       if (!engine->i915->guc.execbuf_client)
-               ret = intel_gt_reset_engine(engine->i915, engine);
-       else
-               ret = intel_guc_reset_engine(&engine->i915->guc, engine);
-       if (ret) {
-               /* If we fail here, we expect to fallback to a global reset */
-               DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
-                                engine->i915->guc.execbuf_client ? "GuC " : "",
-                                engine->name, ret);
-               goto out;
-       }
-
-       /*
-        * The request that caused the hang is stuck on elsp, we know the
-        * active request and can drop it, adjust head to skip the offending
-        * request to resume executing remaining requests in the queue.
-        */
-       i915_gem_reset_engine(engine, active_request, true);
-
-       /*
-        * The engine and its registers (and workarounds in case of render)
-        * have been reset to their default values. Follow the init_ring
-        * process to program RING_MODE, HWSP and re-enable submission.
-        */
-       ret = engine->init_hw(engine);
-       if (ret)
-               goto out;
-
-out:
-       intel_engine_cancel_stop_cs(engine);
-       i915_gem_reset_finish_engine(engine);
-       return ret;
-}
-
 static int i915_pm_prepare(struct device *kdev)
 {
        struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2736,6 +2543,10 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
 static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
                                  u32 mask, u32 val)
 {
+       i915_reg_t reg = VLV_GTLC_PW_STATUS;
+       u32 reg_value;
+       int ret;
+
        /* The HW does not like us polling for PW_STATUS frequently, so
         * use the sleeping loop rather than risk the busy spin within
         * intel_wait_for_register().
@@ -2743,8 +2554,12 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
         * Transitioning between RC6 states should be at most 2ms (see
         * valleyview_enable_rps) so use a 3ms timeout.
         */
-       return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
-                       3);
+       ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);
+
+       /* just trace the final value */
+       trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
+       return ret;
 }
 
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
@@ -2959,7 +2774,7 @@ static int intel_runtime_suspend(struct device *kdev)
        }
 
        enable_rpm_wakeref_asserts(dev_priv);
-       WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
+       intel_runtime_pm_cleanup(dev_priv);
 
        if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
                DRM_ERROR("Unclaimed access detected prior to suspending\n");
@@ -3203,7 +3018,7 @@ static struct drm_driver driver = {
         * deal with them for Intel hardware.
         */
        .driver_features =
-           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
+           DRIVER_GEM | DRIVER_PRIME |
            DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
        .release = i915_driver_release,
        .open = i915_driver_open,
index b1c31967194b92cc47b8a3cedafea021d82fd073..9adc7bb9e69ccfec96e468f95435b83e084ffcce 100644 (file)
@@ -45,8 +45,8 @@
 #include <linux/pm_qos.h>
 #include <linux/reservation.h>
 #include <linux/shmem_fs.h>
+#include <linux/stackdepot.h>
 
-#include <drm/drmP.h>
 #include <drm/intel-gtt.h>
 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
 #include <drm/drm_gem.h>
@@ -54,6 +54,7 @@
 #include <drm/drm_cache.h>
 #include <drm/drm_util.h>
 #include <drm/drm_dsc.h>
+#include <drm/drm_connector.h>
 
 #include "i915_fixed.h"
 #include "i915_params.h"
@@ -90,8 +91,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20181204"
-#define DRIVER_TIMESTAMP       1543944377
+#define DRIVER_DATE            "20190207"
+#define DRIVER_TIMESTAMP       1549572331
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -130,6 +131,8 @@ bool i915_error_injected(void);
        __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
                      fmt, ##__VA_ARGS__)
 
+typedef depot_stack_handle_t intel_wakeref_t;
+
 enum hpd_pin {
        HPD_NONE = 0,
        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
@@ -281,16 +284,14 @@ struct drm_i915_display_funcs {
        int (*get_fifo_size)(struct drm_i915_private *dev_priv,
                             enum i9xx_plane_id i9xx_plane);
        int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
-       int (*compute_intermediate_wm)(struct drm_device *dev,
-                                      struct intel_crtc *intel_crtc,
-                                      struct intel_crtc_state *newstate);
+       int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
        void (*initial_watermarks)(struct intel_atomic_state *state,
                                   struct intel_crtc_state *cstate);
        void (*atomic_update_watermarks)(struct intel_atomic_state *state,
                                         struct intel_crtc_state *cstate);
        void (*optimize_watermarks)(struct intel_atomic_state *state,
                                    struct intel_crtc_state *cstate);
-       int (*compute_global_watermarks)(struct drm_atomic_state *state);
+       int (*compute_global_watermarks)(struct intel_atomic_state *state);
        void (*update_wm)(struct intel_crtc *crtc);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
        /* Returns the active state of the crtc, and if the crtc is active,
@@ -322,8 +323,20 @@ struct drm_i915_display_funcs {
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
 
-       void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
-       void (*load_luts)(struct drm_crtc_state *crtc_state);
+       /*
+        * Program double buffered color management registers during
+        * vblank evasion. The registers should then latch during the
+        * next vblank start, alongside any other double buffered registers
+        * involved with the same commit.
+        */
+       void (*color_commit)(const struct intel_crtc_state *crtc_state);
+       /*
+        * Load LUTs (and other single buffered color management
+        * registers). Will (hopefully) be called during the vblank
+        * following the latching of any double buffered registers
+        * involved with the same commit.
+        */
+       void (*load_luts)(const struct intel_crtc_state *crtc_state);
 };
 
 #define CSR_VERSION(major, minor)      ((major) << 16 | (minor))
@@ -333,16 +346,17 @@ struct drm_i915_display_funcs {
 struct intel_csr {
        struct work_struct work;
        const char *fw_path;
-       uint32_t required_version;
-       uint32_t max_fw_size; /* bytes */
-       uint32_t *dmc_payload;
-       uint32_t dmc_fw_size; /* dwords */
-       uint32_t version;
-       uint32_t mmio_count;
+       u32 required_version;
+       u32 max_fw_size; /* bytes */
+       u32 *dmc_payload;
+       u32 dmc_fw_size; /* dwords */
+       u32 version;
+       u32 mmio_count;
        i915_reg_t mmioaddr[8];
-       uint32_t mmiodata[8];
-       uint32_t dc_state;
-       uint32_t allowed_dc_mask;
+       u32 mmiodata[8];
+       u32 dc_state;
+       u32 allowed_dc_mask;
+       intel_wakeref_t wakeref;
 };
 
 enum i915_cache_level {
@@ -398,7 +412,7 @@ struct intel_fbc {
 
                struct {
                        unsigned int mode_flags;
-                       uint32_t hsw_bdw_pixel_rate;
+                       u32 hsw_bdw_pixel_rate;
                } crtc;
 
                struct {
@@ -417,7 +431,7 @@ struct intel_fbc {
 
                        int y;
 
-                       uint16_t pixel_blend_mode;
+                       u16 pixel_blend_mode;
                } plane;
 
                struct {
@@ -509,6 +523,7 @@ struct i915_psr {
        ktime_t last_exit;
        bool sink_not_reliable;
        bool irq_aux_error;
+       u16 su_x_granularity;
 };
 
 enum intel_pch {
@@ -556,7 +571,7 @@ struct i915_suspend_saved_registers {
        u32 saveSWF0[16];
        u32 saveSWF1[16];
        u32 saveSWF3[3];
-       uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+       u64 saveFENCE[I915_MAX_NUM_FENCES];
        u32 savePCH_PORT_HOTPLUG;
        u16 saveGCDGMBUS;
 };
@@ -819,6 +834,8 @@ struct i915_power_domains {
        bool display_core_suspended;
        int power_well_count;
 
+       intel_wakeref_t wakeref;
+
        struct mutex lock;
        int domain_use_count[POWER_DOMAIN_NUM];
        struct i915_power_well *power_wells;
@@ -901,9 +918,9 @@ struct i915_gem_mm {
        atomic_t bsd_engine_dispatch_index;
 
        /** Bit 6 swizzling required for X tiling */
-       uint32_t bit_6_swizzle_x;
+       u32 bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
-       uint32_t bit_6_swizzle_y;
+       u32 bit_6_swizzle_y;
 
        /* accounting, useful for userland debugging */
        spinlock_t object_stat_lock;
@@ -930,18 +947,20 @@ struct ddi_vbt_port_info {
         * populate this field.
         */
 #define HDMI_LEVEL_SHIFT_UNKNOWN       0xff
-       uint8_t hdmi_level_shift;
+       u8 hdmi_level_shift;
 
-       uint8_t supports_dvi:1;
-       uint8_t supports_hdmi:1;
-       uint8_t supports_dp:1;
-       uint8_t supports_edp:1;
+       u8 supports_dvi:1;
+       u8 supports_hdmi:1;
+       u8 supports_dp:1;
+       u8 supports_edp:1;
+       u8 supports_typec_usb:1;
+       u8 supports_tbt:1;
 
-       uint8_t alternate_aux_channel;
-       uint8_t alternate_ddc_pin;
+       u8 alternate_aux_channel;
+       u8 alternate_ddc_pin;
 
-       uint8_t dp_boost_level;
-       uint8_t hdmi_boost_level;
+       u8 dp_boost_level;
+       u8 hdmi_boost_level;
        int dp_max_link_rate;           /* 0 for not limited by VBT */
 };
 
@@ -1032,41 +1051,41 @@ enum intel_ddb_partitioning {
 
 struct intel_wm_level {
        bool enable;
-       uint32_t pri_val;
-       uint32_t spr_val;
-       uint32_t cur_val;
-       uint32_t fbc_val;
+       u32 pri_val;
+       u32 spr_val;
+       u32 cur_val;
+       u32 fbc_val;
 };
 
 struct ilk_wm_values {
-       uint32_t wm_pipe[3];
-       uint32_t wm_lp[3];
-       uint32_t wm_lp_spr[3];
-       uint32_t wm_linetime[3];
+       u32 wm_pipe[3];
+       u32 wm_lp[3];
+       u32 wm_lp_spr[3];
+       u32 wm_linetime[3];
        bool enable_fbc_wm;
        enum intel_ddb_partitioning partitioning;
 };
 
 struct g4x_pipe_wm {
-       uint16_t plane[I915_MAX_PLANES];
-       uint16_t fbc;
+       u16 plane[I915_MAX_PLANES];
+       u16 fbc;
 };
 
 struct g4x_sr_wm {
-       uint16_t plane;
-       uint16_t cursor;
-       uint16_t fbc;
+       u16 plane;
+       u16 cursor;
+       u16 fbc;
 };
 
 struct vlv_wm_ddl_values {
-       uint8_t plane[I915_MAX_PLANES];
+       u8 plane[I915_MAX_PLANES];
 };
 
 struct vlv_wm_values {
        struct g4x_pipe_wm pipe[3];
        struct g4x_sr_wm sr;
        struct vlv_wm_ddl_values ddl[3];
-       uint8_t level;
+       u8 level;
        bool cxsr;
 };
 
@@ -1080,10 +1099,10 @@ struct g4x_wm_values {
 };
 
 struct skl_ddb_entry {
-       uint16_t start, end;    /* in number of blocks, 'end' is exclusive */
+       u16 start, end; /* in number of blocks, 'end' is exclusive */
 };
 
-static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
 {
        return entry->end - entry->start;
 }
@@ -1107,8 +1126,9 @@ struct skl_ddb_values {
 };
 
 struct skl_wm_level {
-       uint16_t plane_res_b;
-       uint8_t plane_res_l;
+       u16 min_ddb_alloc;
+       u16 plane_res_b;
+       u8 plane_res_l;
        bool plane_en;
 };
 
@@ -1117,15 +1137,15 @@ struct skl_wm_params {
        bool x_tiled, y_tiled;
        bool rc_surface;
        bool is_planar;
-       uint32_t width;
-       uint8_t cpp;
-       uint32_t plane_pixel_rate;
-       uint32_t y_min_scanlines;
-       uint32_t plane_bytes_per_line;
+       u32 width;
+       u8 cpp;
+       u32 plane_pixel_rate;
+       u32 y_min_scanlines;
+       u32 plane_bytes_per_line;
        uint_fixed_16_16_t plane_blocks_per_line;
        uint_fixed_16_16_t y_tile_minimum;
-       uint32_t linetime_us;
-       uint32_t dbuf_block_size;
+       u32 linetime_us;
+       u32 dbuf_block_size;
 };
 
 /*
@@ -1155,6 +1175,25 @@ struct i915_runtime_pm {
        atomic_t wakeref_count;
        bool suspended;
        bool irqs_enabled;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+       /*
+        * To aide detection of wakeref leaks and general misuse, we
+        * track all wakeref holders. With manual markup (i.e. returning
+        * a cookie to each rpm_get caller which they then supply to their
+        * paired rpm_put) we can remove corresponding pairs of and keep
+        * the array trimmed to active wakerefs.
+        */
+       struct intel_runtime_pm_debug {
+               spinlock_t lock;
+
+               depot_stack_handle_t last_acquire;
+               depot_stack_handle_t last_release;
+
+               depot_stack_handle_t *owners;
+               unsigned long count;
+       } debug;
+#endif
 };
 
 enum intel_pipe_crc_source {
@@ -1310,6 +1349,12 @@ struct i915_perf_stream {
         */
        struct list_head link;
 
+       /**
+        * @wakeref: As we keep the device awake while the perf stream is
+        * active, we track our runtime pm reference for later release.
+        */
+       intel_wakeref_t wakeref;
+
        /**
         * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
         * properties given when opening a stream, representing the contents
@@ -1430,7 +1475,8 @@ struct drm_i915_private {
        struct kmem_cache *dependencies;
        struct kmem_cache *priorities;
 
-       const struct intel_device_info info;
+       const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
+       struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
        struct intel_driver_caps caps;
 
        /**
@@ -1482,14 +1528,14 @@ struct drm_i915_private {
         * Base address of where the gmbus and gpio blocks are located (either
         * on PCH or on SoC for platforms without PCH).
         */
-       uint32_t gpio_mmio_base;
+       u32 gpio_mmio_base;
 
        /* MMIO base address for MIPI regs */
-       uint32_t mipi_mmio_base;
+       u32 mipi_mmio_base;
 
-       uint32_t psr_mmio_base;
+       u32 psr_mmio_base;
 
-       uint32_t pps_mmio_base;
+       u32 pps_mmio_base;
 
        wait_queue_head_t gmbus_wait_queue;
 
@@ -1744,17 +1790,17 @@ struct drm_i915_private {
                 * in 0.5us units for WM1+.
                 */
                /* primary */
-               uint16_t pri_latency[5];
+               u16 pri_latency[5];
                /* sprite */
-               uint16_t spr_latency[5];
+               u16 spr_latency[5];
                /* cursor */
-               uint16_t cur_latency[5];
+               u16 cur_latency[5];
                /*
                 * Raw watermark memory latency values
                 * for SKL for all 8 levels
                 * in 1us units.
                 */
-               uint16_t skl_latency[8];
+               u16 skl_latency[8];
 
                /* current hardware state */
                union {
@@ -1764,7 +1810,7 @@ struct drm_i915_private {
                        struct g4x_wm_values g4x;
                };
 
-               uint8_t max_level;
+               u8 max_level;
 
                /*
                 * Should be held around atomic WM register writing; also
@@ -1942,12 +1988,18 @@ struct drm_i915_private {
                void (*resume)(struct drm_i915_private *);
                void (*cleanup_engine)(struct intel_engine_cs *engine);
 
-               struct list_head timelines;
+               struct i915_gt_timelines {
+                       struct mutex mutex; /* protects list, tainted by GPU */
+                       struct list_head active_list;
+
+                       /* Pack multiple timelines' seqnos into the same page */
+                       spinlock_t hwsp_lock;
+                       struct list_head hwsp_free_list;
+               } timelines;
 
                struct list_head active_rings;
                struct list_head closed_vma;
                u32 active_requests;
-               u32 request_serial;
 
                /**
                 * Is the GPU currently considered idle, or busy executing
@@ -1956,7 +2008,7 @@ struct drm_i915_private {
                 * In order to reduce the effect on performance, there
                 * is a slight delay before we do so.
                 */
-               bool awake;
+               intel_wakeref_t awake;
 
                /**
                 * The number of times we have woken up.
@@ -2191,17 +2243,12 @@ static inline unsigned int i915_sg_segment_size(void)
        return size;
 }
 
-static inline const struct intel_device_info *
-intel_info(const struct drm_i915_private *dev_priv)
-{
-       return &dev_priv->info;
-}
-
-#define INTEL_INFO(dev_priv)   intel_info((dev_priv))
+#define INTEL_INFO(dev_priv)   (&(dev_priv)->__info)
+#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
 #define DRIVER_CAPS(dev_priv)  (&(dev_priv)->caps)
 
-#define INTEL_GEN(dev_priv)    ((dev_priv)->info.gen)
-#define INTEL_DEVID(dev_priv)  ((dev_priv)->info.device_id)
+#define INTEL_GEN(dev_priv)    (INTEL_INFO(dev_priv)->gen)
+#define INTEL_DEVID(dev_priv)  (RUNTIME_INFO(dev_priv)->device_id)
 
 #define REVID_FOREVER          0xff
 #define INTEL_REVID(dev_priv)  ((dev_priv)->drm.pdev->revision)
@@ -2212,8 +2259,12 @@ intel_info(const struct drm_i915_private *dev_priv)
        GENMASK((e) - 1, (s) - 1))
 
 /* Returns true if Gen is in inclusive range [Start, End] */
-#define IS_GEN(dev_priv, s, e) \
-       (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
+#define IS_GEN_RANGE(dev_priv, s, e) \
+       (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
+
+#define IS_GEN(dev_priv, n) \
+       (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
+        INTEL_INFO(dev_priv)->gen == (n))
 
 /*
  * Return true if revision is in range [since,until] inclusive.
@@ -2223,7 +2274,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_REVID(p, since, until) \
        (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
 
-#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
+#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
 
 #define IS_I830(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I830)
 #define IS_I845G(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2245,7 +2296,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_IRONLAKE_M(dev_priv)        (INTEL_DEVID(dev_priv) == 0x0046)
 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
 #define IS_IVB_GT1(dev_priv)   (IS_IVYBRIDGE(dev_priv) && \
-                                (dev_priv)->info.gt == 1)
+                                INTEL_INFO(dev_priv)->gt == 1)
 #define IS_VALLEYVIEW(dev_priv)        IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
 #define IS_CHERRYVIEW(dev_priv)        IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
 #define IS_HASWELL(dev_priv)   IS_PLATFORM(dev_priv, INTEL_HASWELL)
@@ -2257,7 +2308,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_COFFEELAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
 #define IS_CANNONLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
 #define IS_ICELAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_MOBILE(dev_priv)    ((dev_priv)->info.is_mobile)
+#define IS_MOBILE(dev_priv)    (INTEL_INFO(dev_priv)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev_priv)   (IS_BROADWELL(dev_priv) && \
@@ -2268,11 +2319,13 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_BDW_ULX(dev_priv)   (IS_BROADWELL(dev_priv) && \
                                 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
 #define IS_BDW_GT3(dev_priv)   (IS_BROADWELL(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_HSW_ULT(dev_priv)   (IS_HASWELL(dev_priv) && \
                                 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
 #define IS_HSW_GT3(dev_priv)   (IS_HASWELL(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
+#define IS_HSW_GT1(dev_priv)   (IS_HASWELL(dev_priv) && \
+                                INTEL_INFO(dev_priv)->gt == 1)
 /* ULX machines are also considered ULT. */
 #define IS_HSW_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x0A0E || \
                                 INTEL_DEVID(dev_priv) == 0x0A1E)
@@ -2295,23 +2348,25 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_AML_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x591C || \
                                 INTEL_DEVID(dev_priv) == 0x87C0)
 #define IS_SKL_GT2(dev_priv)   (IS_SKYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 2)
+                                INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_SKL_GT4(dev_priv)   (IS_SKYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 4)
+                                INTEL_INFO(dev_priv)->gt == 4)
 #define IS_KBL_GT2(dev_priv)   (IS_KABYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 2)
+                                INTEL_INFO(dev_priv)->gt == 2)
 #define IS_KBL_GT3(dev_priv)   (IS_KABYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_CFL_ULT(dev_priv)   (IS_COFFEELAKE(dev_priv) && \
                                 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
 #define IS_CFL_GT2(dev_priv)   (IS_COFFEELAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 2)
+                                INTEL_INFO(dev_priv)->gt == 2)
 #define IS_CFL_GT3(dev_priv)   (IS_COFFEELAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_CNL_WITH_PORT_F(dev_priv)   (IS_CANNONLAKE(dev_priv) && \
                                        (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
+#define IS_ICL_WITH_PORT_F(dev_priv)   (IS_ICELAKE(dev_priv) && \
+                                       INTEL_DEVID(dev_priv) != 0x8A51)
 
 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
 
@@ -2366,26 +2421,9 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_ICL_REVID(p, since, until) \
        (IS_ICELAKE(p) && IS_REVID(p, since, until))
 
-/*
- * The genX designation typically refers to the render engine, so render
- * capability related checks should use IS_GEN, while display and other checks
- * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
- * chips, etc.).
- */
-#define IS_GEN2(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(1)))
-#define IS_GEN3(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(2)))
-#define IS_GEN4(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(3)))
-#define IS_GEN5(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(4)))
-#define IS_GEN6(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(5)))
-#define IS_GEN7(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(6)))
-#define IS_GEN8(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(7)))
-#define IS_GEN9(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(8)))
-#define IS_GEN10(dev_priv)     (!!((dev_priv)->info.gen_mask & BIT(9)))
-#define IS_GEN11(dev_priv)     (!!((dev_priv)->info.gen_mask & BIT(10)))
-
 #define IS_LP(dev_priv)        (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv)   (IS_GEN9(dev_priv) && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv)   (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
+#define IS_GEN9_LP(dev_priv)   (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv)   (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
 
 #define ENGINE_MASK(id)        BIT(id)
 #define RENDER_RING    ENGINE_MASK(RCS)
@@ -2399,29 +2437,27 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define ALL_ENGINES    (~0)
 
 #define HAS_ENGINE(dev_priv, id) \
-       (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
+       (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
 
 #define HAS_BSD(dev_priv)      HAS_ENGINE(dev_priv, VCS)
 #define HAS_BSD2(dev_priv)     HAS_ENGINE(dev_priv, VCS2)
 #define HAS_BLT(dev_priv)      HAS_ENGINE(dev_priv, BCS)
 #define HAS_VEBOX(dev_priv)    HAS_ENGINE(dev_priv, VECS)
 
-#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
-
-#define HAS_LLC(dev_priv)      ((dev_priv)->info.has_llc)
-#define HAS_SNOOP(dev_priv)    ((dev_priv)->info.has_snoop)
+#define HAS_LLC(dev_priv)      (INTEL_INFO(dev_priv)->has_llc)
+#define HAS_SNOOP(dev_priv)    (INTEL_INFO(dev_priv)->has_snoop)
 #define HAS_EDRAM(dev_priv)    (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev_priv)       ((IS_HASWELL(dev_priv) || \
                                 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
 
-#define HWS_NEEDS_PHYSICAL(dev_priv)   ((dev_priv)->info.hws_needs_physical)
+#define HWS_NEEDS_PHYSICAL(dev_priv)   (INTEL_INFO(dev_priv)->hws_needs_physical)
 
 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
-               ((dev_priv)->info.has_logical_ring_contexts)
+               (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
-               ((dev_priv)->info.has_logical_ring_elsq)
+               (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
-               ((dev_priv)->info.has_logical_ring_preemption)
+               (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
 
 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
 
@@ -2435,12 +2471,12 @@ intel_info(const struct drm_i915_private *dev_priv)
 
 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
        GEM_BUG_ON((sizes) == 0); \
-       ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
+       ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
 })
 
-#define HAS_OVERLAY(dev_priv)           ((dev_priv)->info.display.has_overlay)
+#define HAS_OVERLAY(dev_priv)           (INTEL_INFO(dev_priv)->display.has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
-               ((dev_priv)->info.display.overlay_needs_physical)
+               (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)    (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2458,42 +2494,42 @@ intel_info(const struct drm_i915_private *dev_priv)
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
                                         !(IS_I915G(dev_priv) || \
                                         IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv)          ((dev_priv)->info.display.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv)     ((dev_priv)->info.display.has_hotplug)
+#define SUPPORTS_TV(dev_priv)          (INTEL_INFO(dev_priv)->display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv)     (INTEL_INFO(dev_priv)->display.has_hotplug)
 
 #define HAS_FW_BLC(dev_priv)   (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv)      ((dev_priv)->info.display.has_fbc)
-#define HAS_CUR_FBC(dev_priv)  (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
+#define HAS_FBC(dev_priv)      (INTEL_INFO(dev_priv)->display.has_fbc)
+#define HAS_CUR_FBC(dev_priv)  (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
 
 #define HAS_IPS(dev_priv)      (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
 
-#define HAS_DP_MST(dev_priv)   ((dev_priv)->info.display.has_dp_mst)
+#define HAS_DP_MST(dev_priv)   (INTEL_INFO(dev_priv)->display.has_dp_mst)
 
-#define HAS_DDI(dev_priv)               ((dev_priv)->info.display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv)               ((dev_priv)->info.display.has_psr)
+#define HAS_DDI(dev_priv)               (INTEL_INFO(dev_priv)->display.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
+#define HAS_PSR(dev_priv)               (INTEL_INFO(dev_priv)->display.has_psr)
 
-#define HAS_RC6(dev_priv)               ((dev_priv)->info.has_rc6)
-#define HAS_RC6p(dev_priv)              ((dev_priv)->info.has_rc6p)
+#define HAS_RC6(dev_priv)               (INTEL_INFO(dev_priv)->has_rc6)
+#define HAS_RC6p(dev_priv)              (INTEL_INFO(dev_priv)->has_rc6p)
 #define HAS_RC6pp(dev_priv)             (false) /* HW was never validated */
 
-#define HAS_CSR(dev_priv)      ((dev_priv)->info.display.has_csr)
+#define HAS_CSR(dev_priv)      (INTEL_INFO(dev_priv)->display.has_csr)
 
-#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
-#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
 
-#define HAS_IPC(dev_priv)               ((dev_priv)->info.display.has_ipc)
+#define HAS_IPC(dev_priv)               (INTEL_INFO(dev_priv)->display.has_ipc)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
  * properties, so we have separate macros to test them.
  */
-#define HAS_GUC(dev_priv)      ((dev_priv)->info.has_guc)
-#define HAS_GUC_CT(dev_priv)   ((dev_priv)->info.has_guc_ct)
+#define HAS_GUC(dev_priv)      (INTEL_INFO(dev_priv)->has_guc)
+#define HAS_GUC_CT(dev_priv)   (INTEL_INFO(dev_priv)->has_guc_ct)
 #define HAS_GUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 #define HAS_GUC_SCHED(dev_priv)        (HAS_GUC(dev_priv))
 
@@ -2502,11 +2538,11 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_HUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 
 /* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv)             intel_uc_is_using_guc()
-#define USES_GUC_SUBMISSION(dev_priv)  intel_uc_is_using_guc_submission()
-#define USES_HUC(dev_priv)             intel_uc_is_using_huc()
+#define USES_GUC(dev_priv)             intel_uc_is_using_guc(dev_priv)
+#define USES_GUC_SUBMISSION(dev_priv)  intel_uc_is_using_guc_submission(dev_priv)
+#define USES_HUC(dev_priv)             intel_uc_is_using_huc(dev_priv)
 
-#define HAS_POOLED_EU(dev_priv)        ((dev_priv)->info.has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv)        (INTEL_INFO(dev_priv)->has_pooled_eu)
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff80
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
@@ -2546,12 +2582,12 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
+#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
 
 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
                                 2 : HAS_L3_DPF(dev_priv))
 
@@ -2601,19 +2637,7 @@ extern const struct dev_pm_ops i915_pm_ops;
 extern int i915_driver_load(struct pci_dev *pdev,
                            const struct pci_device_id *ent);
 extern void i915_driver_unload(struct drm_device *dev);
-extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
-extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-
-extern void i915_reset(struct drm_i915_private *i915,
-                      unsigned int stalled_mask,
-                      const char *reason);
-extern int i915_reset_engine(struct intel_engine_cs *engine,
-                            const char *reason);
-
-extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
-extern int intel_reset_guc(struct drm_i915_private *dev_priv);
-extern int intel_guc_reset_engine(struct intel_guc *guc,
-                                 struct intel_engine_cs *engine);
+
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2656,20 +2680,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
                           &dev_priv->gpu_error.hangcheck_work, delay);
 }
 
-__printf(4, 5)
-void i915_handle_error(struct drm_i915_private *dev_priv,
-                      u32 engine_mask,
-                      unsigned long flags,
-                      const char *fmt, ...);
-#define I915_ERROR_CAPTURE BIT(0)
-
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
 extern void intel_irq_fini(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
-void i915_clear_error_registers(struct drm_i915_private *dev_priv);
-
 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
 {
        return dev_priv->gvt;
@@ -2693,45 +2708,45 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
-                                  uint32_t mask,
-                                  uint32_t bits);
+                                  u32 mask,
+                                  u32 bits);
 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
-                           uint32_t interrupt_mask,
-                           uint32_t enabled_irq_mask);
+                           u32 interrupt_mask,
+                           u32 enabled_irq_mask);
 static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
 {
        ilk_update_display_irq(dev_priv, bits, bits);
 }
 static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
 {
        ilk_update_display_irq(dev_priv, bits, 0);
 }
 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
                         enum pipe pipe,
-                        uint32_t interrupt_mask,
-                        uint32_t enabled_irq_mask);
+                        u32 interrupt_mask,
+                        u32 enabled_irq_mask);
 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
-                                      enum pipe pipe, uint32_t bits)
+                                      enum pipe pipe, u32 bits)
 {
        bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
 }
 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
-                                       enum pipe pipe, uint32_t bits)
+                                       enum pipe pipe, u32 bits)
 {
        bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
 }
 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
-                                 uint32_t interrupt_mask,
-                                 uint32_t enabled_irq_mask);
+                                 u32 interrupt_mask,
+                                 u32 enabled_irq_mask);
 static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
 {
        ibx_display_interrupt_update(dev_priv, bits, bits);
 }
 static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
 {
        ibx_display_interrupt_update(dev_priv, bits, 0);
 }
@@ -2916,13 +2931,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
        __i915_gem_object_unpin_pages(obj);
 }
 
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
        I915_MM_NORMAL = 0,
-       I915_MM_SHRINKER
+       I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
 };
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-                                enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+                               enum i915_mm_subclass subclass);
 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
 
 enum i915_map_type {
@@ -2991,7 +3006,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
-                     uint32_t handle, uint64_t *offset);
+                     u32 handle, u64 *offset);
 int i915_gem_mmap_gtt_version(void);
 
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
@@ -3008,11 +3023,6 @@ static inline bool i915_reset_backoff(struct i915_gpu_error *error)
        return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
 }
 
-static inline bool i915_reset_handoff(struct i915_gpu_error *error)
-{
-       return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
-}
-
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
        return unlikely(test_bit(I915_WEDGED, &error->flags));
@@ -3034,18 +3044,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
        return READ_ONCE(error->reset_engine_count[engine->id]);
 }
 
-struct i915_request *
-i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
-int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
-void i915_gem_reset(struct drm_i915_private *dev_priv,
-                   unsigned int stalled_mask);
-void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
-void i915_gem_reset_engine(struct intel_engine_cs *engine,
-                          struct i915_request *request,
-                          bool stalled);
 
 void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3142,7 +3142,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file);
 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
                            struct i915_gem_context *ctx,
-                           uint32_t *reg_state);
+                           u32 *reg_state);
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@@ -3204,7 +3204,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
 void i915_gem_shrinker_register(struct drm_i915_private *i915);
 void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+                                   struct mutex *mutex);
 
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3313,7 +3314,21 @@ static inline void intel_unregister_dsm_handler(void) { return; }
 static inline struct intel_device_info *
 mkwrite_device_info(struct drm_i915_private *dev_priv)
 {
-       return (struct intel_device_info *)&dev_priv->info;
+       return (struct intel_device_info *)INTEL_INFO(dev_priv);
+}
+
+static inline struct intel_sseu
+intel_device_default_sseu(struct drm_i915_private *i915)
+{
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+       struct intel_sseu value = {
+               .slice_mask = sseu->slice_mask,
+               .subslice_mask = sseu->subslice_mask[0],
+               .min_eus_per_subslice = sseu->max_eus_per_subslice,
+               .max_eus_per_subslice = sseu->max_eus_per_subslice,
+       };
+
+       return value;
 }
 
 /* modesetting */
@@ -3393,10 +3408,10 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
                            enum dpio_phy phy);
 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
                              enum dpio_phy phy);
-uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
+u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
-                                    uint8_t lane_lat_optim_mask);
-uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+                                    u8 lane_lat_optim_mask);
+u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
 
 void chv_set_phy_signal_level(struct intel_encoder *encoder,
                              u32 deemph_reg_value, u32 margin_reg_value,
@@ -3599,90 +3614,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
        }
 }
 
-static inline bool
-__i915_request_irq_complete(const struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 seqno;
-
-       /* Note that the engine may have wrapped around the seqno, and
-        * so our request->global_seqno will be ahead of the hardware,
-        * even though it completed the request before wrapping. We catch
-        * this by kicking all the waiters before resetting the seqno
-        * in hardware, and also signal the fence.
-        */
-       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
-               return true;
-
-       /* The request was dequeued before we were awoken. We check after
-        * inspecting the hw to confirm that this was the same request
-        * that generated the HWS update. The memory barriers within
-        * the request execution are sufficient to ensure that a check
-        * after reading the value from hw matches this request.
-        */
-       seqno = i915_request_global_seqno(rq);
-       if (!seqno)
-               return false;
-
-       /* Before we do the heavier coherent read of the seqno,
-        * check the value (hopefully) in the CPU cacheline.
-        */
-       if (__i915_request_completed(rq, seqno))
-               return true;
-
-       /* Ensure our read of the seqno is coherent so that we
-        * do not "miss an interrupt" (i.e. if this is the last
-        * request and the seqno write from the GPU is not visible
-        * by the time the interrupt fires, we will see that the
-        * request is incomplete and go back to sleep awaiting
-        * another interrupt that will never come.)
-        *
-        * Strictly, we only need to do this once after an interrupt,
-        * but it is easier and safer to do it every time the waiter
-        * is woken.
-        */
-       if (engine->irq_seqno_barrier &&
-           test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
-               struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
-               /* The ordering of irq_posted versus applying the barrier
-                * is crucial. The clearing of the current irq_posted must
-                * be visible before we perform the barrier operation,
-                * such that if a subsequent interrupt arrives, irq_posted
-                * is reasserted and our task rewoken (which causes us to
-                * do another __i915_request_irq_complete() immediately
-                * and reapply the barrier). Conversely, if the clear
-                * occurs after the barrier, then an interrupt that arrived
-                * whilst we waited on the barrier would not trigger a
-                * barrier on the next pass, and the read may not see the
-                * seqno update.
-                */
-               engine->irq_seqno_barrier(engine);
-
-               /* If we consume the irq, but we are no longer the bottom-half,
-                * the real bottom-half may not have serialised their own
-                * seqno check with the irq-barrier (i.e. may have inspected
-                * the seqno before we believe it coherent since they see
-                * irq_posted == false but we are still running).
-                */
-               spin_lock_irq(&b->irq_lock);
-               if (b->irq_wait && b->irq_wait->tsk != current)
-                       /* Note that if the bottom-half is changed as we
-                        * are sending the wake-up, the new bottom-half will
-                        * be woken by whomever made the change. We only have
-                        * to worry about when we steal the irq-posted for
-                        * ourself.
-                        */
-                       wake_up_process(b->irq_wait->tsk);
-               spin_unlock_irq(&b->irq_lock);
-
-               if (__i915_request_completed(rq, seqno))
-                       return true;
-       }
-
-       return false;
-}
-
 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
 
index 7399ac7a562924890b47c5eac4d52371bbcd8e2c..6728ea5c71d4c2916a37daa20ec767252a96d0ff 100644 (file)
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_vma_manager.h>
+#include <drm/drm_pci.h>
 #include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_vgpu.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
-#include "intel_mocs.h"
-#include "intel_workarounds.h"
-#include "i915_gemfs.h"
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
 #include <linux/reservation.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
+#include <linux/mman.h>
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_gemfs.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_mocs.h"
+#include "intel_workarounds.h"
 
 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
 
@@ -139,6 +143,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 
 static u32 __i915_gem_park(struct drm_i915_private *i915)
 {
+       intel_wakeref_t wakeref;
+
        GEM_TRACE("\n");
 
        lockdep_assert_held(&i915->drm.struct_mutex);
@@ -169,14 +175,13 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
        i915_pmu_gt_parked(i915);
        i915_vma_parked(i915);
 
-       i915->gt.awake = false;
+       wakeref = fetch_and_zero(&i915->gt.awake);
+       GEM_BUG_ON(!wakeref);
 
        if (INTEL_GEN(i915) >= 6)
                gen6_rps_idle(i915);
 
-       intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
-
-       intel_runtime_pm_put(i915);
+       intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
        return i915->gt.epoch;
 }
@@ -201,12 +206,11 @@ void i915_gem_unpark(struct drm_i915_private *i915)
 
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(!i915->gt.active_requests);
+       assert_rpm_wakelock_held(i915);
 
        if (i915->gt.awake)
                return;
 
-       intel_runtime_pm_get_noresume(i915);
-
        /*
         * It seems that the DMC likes to transition between the DC states a lot
         * when there are no connected displays (no active power domains) during
@@ -218,9 +222,9 @@ void i915_gem_unpark(struct drm_i915_private *i915)
         * Work around it by grabbing a GT IRQ power domain whilst there is any
         * GT activity, preventing any DC state transitions.
         */
-       intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+       i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+       GEM_BUG_ON(!i915->gt.awake);
 
-       i915->gt.awake = true;
        if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
                i915->gt.epoch = 1;
 
@@ -243,21 +247,19 @@ int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
        struct drm_i915_gem_get_aperture *args = data;
        struct i915_vma *vma;
        u64 pinned;
 
+       mutex_lock(&ggtt->vm.mutex);
+
        pinned = ggtt->vm.reserved;
-       mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
-               if (i915_vma_is_pinned(vma))
-                       pinned += vma->node.size;
-       list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
+       list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
                if (i915_vma_is_pinned(vma))
                        pinned += vma->node.size;
-       mutex_unlock(&dev->struct_mutex);
+
+       mutex_unlock(&ggtt->vm.mutex);
 
        args->aper_size = ggtt->vm.total;
        args->aper_available_size = args->aper_size - pinned;
@@ -437,15 +439,19 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (ret)
                return ret;
 
-       while ((vma = list_first_entry_or_null(&obj->vma_list,
-                                              struct i915_vma,
-                                              obj_link))) {
+       spin_lock(&obj->vma.lock);
+       while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
+                                                      struct i915_vma,
+                                                      obj_link))) {
                list_move_tail(&vma->obj_link, &still_in_list);
+               spin_unlock(&obj->vma.lock);
+
                ret = i915_vma_unbind(vma);
-               if (ret)
-                       break;
+
+               spin_lock(&obj->vma.lock);
        }
-       list_splice(&still_in_list, &obj->vma_list);
+       list_splice(&still_in_list, &obj->vma.list);
+       spin_unlock(&obj->vma.lock);
 
        return ret;
 }
@@ -655,11 +661,6 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
                     struct intel_rps_client *rps_client)
 {
        might_sleep();
-#if IS_ENABLED(CONFIG_LOCKDEP)
-       GEM_BUG_ON(debug_locks &&
-                  !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
-                  !!(flags & I915_WAIT_LOCKED));
-#endif
        GEM_BUG_ON(timeout < 0);
 
        timeout = i915_gem_object_wait_reservation(obj->resv,
@@ -711,8 +712,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
 static int
 i915_gem_create(struct drm_file *file,
                struct drm_i915_private *dev_priv,
-               uint64_t size,
-               uint32_t *handle_p)
+               u64 size,
+               u32 *handle_p)
 {
        struct drm_i915_gem_object *obj;
        int ret;
@@ -783,6 +784,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
 
 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
 {
+       intel_wakeref_t wakeref;
+
        /*
         * No actual flushing is required for the GTT write domain for reads
         * from the GTT domain. Writes to it "immediately" go to main memory
@@ -809,13 +812,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
 
        i915_gem_chipset_flush(dev_priv);
 
-       intel_runtime_pm_get(dev_priv);
-       spin_lock_irq(&dev_priv->uncore.lock);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               spin_lock_irq(&dev_priv->uncore.lock);
 
-       POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
+               POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
 
-       spin_unlock_irq(&dev_priv->uncore.lock);
-       intel_runtime_pm_put(dev_priv);
+               spin_unlock_irq(&dev_priv->uncore.lock);
+       }
 }
 
 static void
@@ -859,58 +862,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
        obj->write_domain = 0;
 }
 
-static inline int
-__copy_to_user_swizzled(char __user *cpu_vaddr,
-                       const char *gpu_vaddr, int gpu_offset,
-                       int length)
-{
-       int ret, cpu_offset = 0;
-
-       while (length > 0) {
-               int cacheline_end = ALIGN(gpu_offset + 1, 64);
-               int this_length = min(cacheline_end - gpu_offset, length);
-               int swizzled_gpu_offset = gpu_offset ^ 64;
-
-               ret = __copy_to_user(cpu_vaddr + cpu_offset,
-                                    gpu_vaddr + swizzled_gpu_offset,
-                                    this_length);
-               if (ret)
-                       return ret + length;
-
-               cpu_offset += this_length;
-               gpu_offset += this_length;
-               length -= this_length;
-       }
-
-       return 0;
-}
-
-static inline int
-__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
-                         const char __user *cpu_vaddr,
-                         int length)
-{
-       int ret, cpu_offset = 0;
-
-       while (length > 0) {
-               int cacheline_end = ALIGN(gpu_offset + 1, 64);
-               int this_length = min(cacheline_end - gpu_offset, length);
-               int swizzled_gpu_offset = gpu_offset ^ 64;
-
-               ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
-                                      cpu_vaddr + cpu_offset,
-                                      this_length);
-               if (ret)
-                       return ret + length;
-
-               cpu_offset += this_length;
-               gpu_offset += this_length;
-               length -= this_length;
-       }
-
-       return 0;
-}
-
 /*
  * Pins the specified object's pages and synchronizes the object with
  * GPU accesses. Sets needs_clflush to non-zero if the caller should
@@ -1030,72 +981,23 @@ err_unpin:
        return ret;
 }
 
-static void
-shmem_clflush_swizzled_range(char *addr, unsigned long length,
-                            bool swizzled)
-{
-       if (unlikely(swizzled)) {
-               unsigned long start = (unsigned long) addr;
-               unsigned long end = (unsigned long) addr + length;
-
-               /* For swizzling simply ensure that we always flush both
-                * channels. Lame, but simple and it works. Swizzled
-                * pwrite/pread is far from a hotpath - current userspace
-                * doesn't use it at all. */
-               start = round_down(start, 128);
-               end = round_up(end, 128);
-
-               drm_clflush_virt_range((void *)start, end - start);
-       } else {
-               drm_clflush_virt_range(addr, length);
-       }
-
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
 static int
-shmem_pread_slow(struct page *page, int offset, int length,
-                char __user *user_data,
-                bool page_do_bit17_swizzling, bool needs_clflush)
+shmem_pread(struct page *page, int offset, int len, char __user *user_data,
+           bool needs_clflush)
 {
        char *vaddr;
        int ret;
 
        vaddr = kmap(page);
-       if (needs_clflush)
-               shmem_clflush_swizzled_range(vaddr + offset, length,
-                                            page_do_bit17_swizzling);
-
-       if (page_do_bit17_swizzling)
-               ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
-       else
-               ret = __copy_to_user(user_data, vaddr + offset, length);
-       kunmap(page);
 
-       return ret ? - EFAULT : 0;
-}
-
-static int
-shmem_pread(struct page *page, int offset, int length, char __user *user_data,
-           bool page_do_bit17_swizzling, bool needs_clflush)
-{
-       int ret;
+       if (needs_clflush)
+               drm_clflush_virt_range(vaddr + offset, len);
 
-       ret = -ENODEV;
-       if (!page_do_bit17_swizzling) {
-               char *vaddr = kmap_atomic(page);
+       ret = __copy_to_user(user_data, vaddr + offset, len);
 
-               if (needs_clflush)
-                       drm_clflush_virt_range(vaddr + offset, length);
-               ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
-               kunmap_atomic(vaddr);
-       }
-       if (ret == 0)
-               return 0;
+       kunmap(page);
 
-       return shmem_pread_slow(page, offset, length, user_data,
-                               page_do_bit17_swizzling, needs_clflush);
+       return ret ? -EFAULT : 0;
 }
 
 static int
@@ -1104,15 +1006,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
 {
        char __user *user_data;
        u64 remain;
-       unsigned int obj_do_bit17_swizzling;
        unsigned int needs_clflush;
        unsigned int idx, offset;
        int ret;
 
-       obj_do_bit17_swizzling = 0;
-       if (i915_gem_object_needs_bit17_swizzle(obj))
-               obj_do_bit17_swizzling = BIT(17);
-
        ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
        if (ret)
                return ret;
@@ -1130,7 +1027,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
                unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
 
                ret = shmem_pread(page, offset, length, user_data,
-                                 page_to_phys(page) & obj_do_bit17_swizzling,
                                  needs_clflush);
                if (ret)
                        break;
@@ -1174,6 +1070,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt *ggtt = &i915->ggtt;
+       intel_wakeref_t wakeref;
        struct drm_mm_node node;
        struct i915_vma *vma;
        void __user *user_data;
@@ -1184,7 +1081,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
                                       PIN_MAPPABLE |
                                       PIN_NONFAULT |
@@ -1257,7 +1154,7 @@ out_unpin:
                i915_vma_unpin(vma);
        }
 out_unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
 
        return ret;
@@ -1358,6 +1255,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt *ggtt = &i915->ggtt;
+       intel_wakeref_t wakeref;
        struct drm_mm_node node;
        struct i915_vma *vma;
        u64 remain, offset;
@@ -1376,13 +1274,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                 * This easily dwarfs any performance advantage from
                 * using the cache bypass of indirect GGTT access.
                 */
-               if (!intel_runtime_pm_get_if_in_use(i915)) {
+               wakeref = intel_runtime_pm_get_if_in_use(i915);
+               if (!wakeref) {
                        ret = -EFAULT;
                        goto out_unlock;
                }
        } else {
                /* No backing pages, no fallback, we must force GGTT access */
-               intel_runtime_pm_get(i915);
+               wakeref = intel_runtime_pm_get(i915);
        }
 
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -1464,39 +1363,12 @@ out_unpin:
                i915_vma_unpin(vma);
        }
 out_rpm:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 out_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        return ret;
 }
 
-static int
-shmem_pwrite_slow(struct page *page, int offset, int length,
-                 char __user *user_data,
-                 bool page_do_bit17_swizzling,
-                 bool needs_clflush_before,
-                 bool needs_clflush_after)
-{
-       char *vaddr;
-       int ret;
-
-       vaddr = kmap(page);
-       if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
-               shmem_clflush_swizzled_range(vaddr + offset, length,
-                                            page_do_bit17_swizzling);
-       if (page_do_bit17_swizzling)
-               ret = __copy_from_user_swizzled(vaddr, offset, user_data,
-                                               length);
-       else
-               ret = __copy_from_user(vaddr + offset, user_data, length);
-       if (needs_clflush_after)
-               shmem_clflush_swizzled_range(vaddr + offset, length,
-                                            page_do_bit17_swizzling);
-       kunmap(page);
-
-       return ret ? -EFAULT : 0;
-}
-
 /* Per-page copy function for the shmem pwrite fastpath.
  * Flushes invalid cachelines before writing to the target if
  * needs_clflush_before is set and flushes out any written cachelines after
@@ -1504,31 +1376,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length,
  */
 static int
 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
-            bool page_do_bit17_swizzling,
             bool needs_clflush_before,
             bool needs_clflush_after)
 {
+       char *vaddr;
        int ret;
 
-       ret = -ENODEV;
-       if (!page_do_bit17_swizzling) {
-               char *vaddr = kmap_atomic(page);
+       vaddr = kmap(page);
 
-               if (needs_clflush_before)
-                       drm_clflush_virt_range(vaddr + offset, len);
-               ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
-               if (needs_clflush_after)
-                       drm_clflush_virt_range(vaddr + offset, len);
+       if (needs_clflush_before)
+               drm_clflush_virt_range(vaddr + offset, len);
 
-               kunmap_atomic(vaddr);
-       }
-       if (ret == 0)
-               return ret;
+       ret = __copy_from_user(vaddr + offset, user_data, len);
+       if (!ret && needs_clflush_after)
+               drm_clflush_virt_range(vaddr + offset, len);
 
-       return shmem_pwrite_slow(page, offset, len, user_data,
-                                page_do_bit17_swizzling,
-                                needs_clflush_before,
-                                needs_clflush_after);
+       kunmap(page);
+
+       return ret ? -EFAULT : 0;
 }
 
 static int
@@ -1538,7 +1403,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        void __user *user_data;
        u64 remain;
-       unsigned int obj_do_bit17_swizzling;
        unsigned int partial_cacheline_write;
        unsigned int needs_clflush;
        unsigned int offset, idx;
@@ -1553,10 +1417,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       obj_do_bit17_swizzling = 0;
-       if (i915_gem_object_needs_bit17_swizzle(obj))
-               obj_do_bit17_swizzling = BIT(17);
-
        /* If we don't overwrite a cacheline completely we need to be
         * careful to have up-to-date data by first clflushing. Don't
         * overcomplicate things and flush the entire patch.
@@ -1573,7 +1433,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
                unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
 
                ret = shmem_pwrite(page, offset, length, user_data,
-                                  page_to_phys(page) & obj_do_bit17_swizzling,
                                   (offset | length) & partial_cacheline_write,
                                   needs_clflush & CLFLUSH_AFTER);
                if (ret)
@@ -1677,23 +1536,21 @@ err:
 
 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *i915;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct list_head *list;
        struct i915_vma *vma;
 
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
+       mutex_lock(&i915->ggtt.vm.mutex);
        for_each_ggtt_vma(vma, obj) {
-               if (i915_vma_is_active(vma))
-                       continue;
-
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
-               list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+               list_move_tail(&vma->vm_link, &vma->vm->bound_list);
        }
+       mutex_unlock(&i915->ggtt.vm.mutex);
 
-       i915 = to_i915(obj->base.dev);
        spin_lock(&i915->mm.obj_lock);
        list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
        list_move_tail(&obj->mm.link, list);
@@ -1713,8 +1570,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_set_domain *args = data;
        struct drm_i915_gem_object *obj;
-       uint32_t read_domains = args->read_domains;
-       uint32_t write_domain = args->write_domain;
+       u32 read_domains = args->read_domains;
+       u32 write_domain = args->write_domain;
        int err;
 
        /* Only handle setting domains to types used by the CPU. */
@@ -1824,6 +1681,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static inline bool
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
+             unsigned long addr, unsigned long size)
+{
+       if (vma->vm_file != filp)
+               return false;
+
+       return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+}
+
 /**
  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
  *                      it is mapped to.
@@ -1873,6 +1740,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        addr = vm_mmap(obj->base.filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
+       if (IS_ERR_VALUE(addr))
+               goto err;
+
        if (args->flags & I915_MMAP_WC) {
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
@@ -1882,23 +1752,28 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                        return -EINTR;
                }
                vma = find_vma(mm, addr);
-               if (vma)
+               if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
                        vma->vm_page_prot =
                                pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
                else
                        addr = -ENOMEM;
                up_write(&mm->mmap_sem);
+               if (IS_ERR_VALUE(addr))
+                       goto err;
 
                /* This may race, but that's ok, it only gets set */
                WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
        }
        i915_gem_object_put(obj);
-       if (IS_ERR((void *)addr))
-               return addr;
 
-       args->addr_ptr = (uint64_t) addr;
+       args->addr_ptr = (u64)addr;
 
        return 0;
+
+err:
+       i915_gem_object_put(obj);
+
+       return addr;
 }
 
 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
@@ -2009,6 +1884,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool write = area->vm_flags & VM_WRITE;
+       intel_wakeref_t wakeref;
        struct i915_vma *vma;
        pgoff_t page_offset;
        int ret;
@@ -2038,7 +1914,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
        if (ret)
                goto err;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -2116,7 +1992,7 @@ err_unpin:
 err_unlock:
        mutex_unlock(&dev->struct_mutex);
 err_rpm:
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        i915_gem_object_unpin_pages(obj);
 err:
        switch (ret) {
@@ -2189,6 +2065,7 @@ void
 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       intel_wakeref_t wakeref;
 
        /* Serialisation between user GTT access and our code depends upon
         * revoking the CPU's PTE whilst the mutex is held. The next user
@@ -2199,7 +2076,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
         * wakeref.
         */
        lockdep_assert_held(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (!obj->userfault_count)
                goto out;
@@ -2216,7 +2093,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        wmb();
 
 out:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
@@ -2296,8 +2173,8 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 int
 i915_gem_mmap_gtt(struct drm_file *file,
                  struct drm_device *dev,
-                 uint32_t handle,
-                 uint64_t *offset)
+                 u32 handle,
+                 u64 *offset)
 {
        struct drm_i915_gem_object *obj;
        int ret;
@@ -2444,8 +2321,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
        struct sg_table *pages;
 
        pages = fetch_and_zero(&obj->mm.pages);
-       if (!pages)
-               return NULL;
+       if (IS_ERR_OR_NULL(pages))
+               return pages;
 
        spin_lock(&i915->mm.obj_lock);
        list_del(&obj->mm.link);
@@ -2469,22 +2346,23 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
        return pages;
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-                                enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+                               enum i915_mm_subclass subclass)
 {
        struct sg_table *pages;
+       int ret;
 
        if (i915_gem_object_has_pinned_pages(obj))
-               return;
+               return -EBUSY;
 
        GEM_BUG_ON(obj->bind_count);
-       if (!i915_gem_object_has_pages(obj))
-               return;
 
        /* May be called by shrinker from within get_pages() (on another bo) */
        mutex_lock_nested(&obj->mm.lock, subclass);
-       if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+       if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+               ret = -EBUSY;
                goto unlock;
+       }
 
        /*
         * ->put_pages might need to allocate memory for the bit17 swizzle
@@ -2492,11 +2370,24 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
         * lists early.
         */
        pages = __i915_gem_object_unset_pages(obj);
+
+       /*
+        * XXX Temporary hijinx to avoid updating all backends to handle
+        * NULL pages. In the future, when we have more asynchronous
+        * get_pages backends we should be better able to handle the
+        * cancellation of the async task in a more uniform manner.
+        */
+       if (!pages && !i915_gem_object_needs_async_cancel(obj))
+               pages = ERR_PTR(-EINVAL);
+
        if (!IS_ERR(pages))
                obj->ops->put_pages(obj, pages);
 
+       ret = 0;
 unlock:
        mutex_unlock(&obj->mm.lock);
+
+       return ret;
 }
 
 bool i915_sg_trim(struct sg_table *orig_st)
@@ -3000,59 +2891,12 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        return 0;
 }
 
-static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
-                                       const struct i915_gem_context *ctx)
+static bool match_ring(struct i915_request *rq)
 {
-       unsigned int score;
-       unsigned long prev_hang;
+       struct drm_i915_private *dev_priv = rq->i915;
+       u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
 
-       if (i915_gem_context_is_banned(ctx))
-               score = I915_CLIENT_SCORE_CONTEXT_BAN;
-       else
-               score = 0;
-
-       prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
-       if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
-               score += I915_CLIENT_SCORE_HANG_FAST;
-
-       if (score) {
-               atomic_add(score, &file_priv->ban_score);
-
-               DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
-                                ctx->name, score,
-                                atomic_read(&file_priv->ban_score));
-       }
-}
-
-static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
-{
-       unsigned int score;
-       bool banned, bannable;
-
-       atomic_inc(&ctx->guilty_count);
-
-       bannable = i915_gem_context_is_bannable(ctx);
-       score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
-       banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
-
-       /* Cool contexts don't accumulate client ban score */
-       if (!bannable)
-               return;
-
-       if (banned) {
-               DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
-                                ctx->name, atomic_read(&ctx->guilty_count),
-                                score);
-               i915_gem_context_set_banned(ctx);
-       }
-
-       if (!IS_ERR_OR_NULL(ctx->file_priv))
-               i915_gem_client_mark_guilty(ctx->file_priv, ctx);
-}
-
-static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
-{
-       atomic_inc(&ctx->active_count);
+       return ring == i915_ggtt_offset(rq->ring->vma);
 }
 
 struct i915_request *
@@ -3074,9 +2918,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
         */
        spin_lock_irqsave(&engine->timeline.lock, flags);
        list_for_each_entry(request, &engine->timeline.requests, link) {
-               if (__i915_request_completed(request, request->global_seqno))
+               if (i915_request_completed(request))
                        continue;
 
+               if (!i915_request_started(request))
+                       break;
+
+               /* More than one preemptible request may match! */
+               if (!match_ring(request))
+                       break;
+
                active = request;
                break;
        }
@@ -3085,366 +2936,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
        return active;
 }
 
-/*
- * Ensure irq handler finishes, and not run again.
- * Also return the active request so that we only search for it once.
- */
-struct i915_request *
-i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
-{
-       struct i915_request *request;
-
-       /*
-        * During the reset sequence, we must prevent the engine from
-        * entering RC6. As the context state is undefined until we restart
-        * the engine, if it does enter RC6 during the reset, the state
-        * written to the powercontext is undefined and so we may lose
-        * GPU state upon resume, i.e. fail to restart after a reset.
-        */
-       intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
-       request = engine->reset.prepare(engine);
-       if (request && request->fence.error == -EIO)
-               request = ERR_PTR(-EIO); /* Previous reset failed! */
-
-       return request;
-}
-
-int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       struct i915_request *request;
-       enum intel_engine_id id;
-       int err = 0;
-
-       for_each_engine(engine, dev_priv, id) {
-               request = i915_gem_reset_prepare_engine(engine);
-               if (IS_ERR(request)) {
-                       err = PTR_ERR(request);
-                       continue;
-               }
-
-               engine->hangcheck.active_request = request;
-       }
-
-       i915_gem_revoke_fences(dev_priv);
-       intel_uc_sanitize(dev_priv);
-
-       return err;
-}
-
-static void engine_skip_context(struct i915_request *request)
-{
-       struct intel_engine_cs *engine = request->engine;
-       struct i915_gem_context *hung_ctx = request->gem_context;
-       struct i915_timeline *timeline = request->timeline;
-       unsigned long flags;
-
-       GEM_BUG_ON(timeline == &engine->timeline);
-
-       spin_lock_irqsave(&engine->timeline.lock, flags);
-       spin_lock(&timeline->lock);
-
-       list_for_each_entry_continue(request, &engine->timeline.requests, link)
-               if (request->gem_context == hung_ctx)
-                       i915_request_skip(request, -EIO);
-
-       list_for_each_entry(request, &timeline->requests, link)
-               i915_request_skip(request, -EIO);
-
-       spin_unlock(&timeline->lock);
-       spin_unlock_irqrestore(&engine->timeline.lock, flags);
-}
-
-/* Returns the request if it was guilty of the hang */
-static struct i915_request *
-i915_gem_reset_request(struct intel_engine_cs *engine,
-                      struct i915_request *request,
-                      bool stalled)
-{
-       /* The guilty request will get skipped on a hung engine.
-        *
-        * Users of client default contexts do not rely on logical
-        * state preserved between batches so it is safe to execute
-        * queued requests following the hang. Non default contexts
-        * rely on preserved state, so skipping a batch loses the
-        * evolution of the state and it needs to be considered corrupted.
-        * Executing more queued batches on top of corrupted state is
-        * risky. But we take the risk by trying to advance through
-        * the queued requests in order to make the client behaviour
-        * more predictable around resets, by not throwing away random
-        * amount of batches it has prepared for execution. Sophisticated
-        * clients can use gem_reset_stats_ioctl and dma fence status
-        * (exported via sync_file info ioctl on explicit fences) to observe
-        * when it loses the context state and should rebuild accordingly.
-        *
-        * The context ban, and ultimately the client ban, mechanism are safety
-        * valves if client submission ends up resulting in nothing more than
-        * subsequent hangs.
-        */
-
-       if (i915_request_completed(request)) {
-               GEM_TRACE("%s pardoned global=%d (fence %llx:%lld), current %d\n",
-                         engine->name, request->global_seqno,
-                         request->fence.context, request->fence.seqno,
-                         intel_engine_get_seqno(engine));
-               stalled = false;
-       }
-
-       if (stalled) {
-               i915_gem_context_mark_guilty(request->gem_context);
-               i915_request_skip(request, -EIO);
-
-               /* If this context is now banned, skip all pending requests. */
-               if (i915_gem_context_is_banned(request->gem_context))
-                       engine_skip_context(request);
-       } else {
-               /*
-                * Since this is not the hung engine, it may have advanced
-                * since the hang declaration. Double check by refinding
-                * the active request at the time of the reset.
-                */
-               request = i915_gem_find_active_request(engine);
-               if (request) {
-                       unsigned long flags;
-
-                       i915_gem_context_mark_innocent(request->gem_context);
-                       dma_fence_set_error(&request->fence, -EAGAIN);
-
-                       /* Rewind the engine to replay the incomplete rq */
-                       spin_lock_irqsave(&engine->timeline.lock, flags);
-                       request = list_prev_entry(request, link);
-                       if (&request->link == &engine->timeline.requests)
-                               request = NULL;
-                       spin_unlock_irqrestore(&engine->timeline.lock, flags);
-               }
-       }
-
-       return request;
-}
-
-void i915_gem_reset_engine(struct intel_engine_cs *engine,
-                          struct i915_request *request,
-                          bool stalled)
-{
-       /*
-        * Make sure this write is visible before we re-enable the interrupt
-        * handlers on another CPU, as tasklet_enable() resolves to just
-        * a compiler barrier which is insufficient for our purpose here.
-        */
-       smp_store_mb(engine->irq_posted, 0);
-
-       if (request)
-               request = i915_gem_reset_request(engine, request, stalled);
-
-       /* Setup the CS to resume from the breadcrumb of the hung request */
-       engine->reset.reset(engine, request);
-}
-
-void i915_gem_reset(struct drm_i915_private *dev_priv,
-                   unsigned int stalled_mask)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       i915_retire_requests(dev_priv);
-
-       for_each_engine(engine, dev_priv, id) {
-               struct intel_context *ce;
-
-               i915_gem_reset_engine(engine,
-                                     engine->hangcheck.active_request,
-                                     stalled_mask & ENGINE_MASK(id));
-               ce = fetch_and_zero(&engine->last_retired_context);
-               if (ce)
-                       intel_context_unpin(ce);
-
-               /*
-                * Ostensibily, we always want a context loaded for powersaving,
-                * so if the engine is idle after the reset, send a request
-                * to load our scratch kernel_context.
-                *
-                * More mysteriously, if we leave the engine idle after a reset,
-                * the next userspace batch may hang, with what appears to be
-                * an incoherent read by the CS (presumably stale TLB). An
-                * empty request appears sufficient to paper over the glitch.
-                */
-               if (intel_engine_is_idle(engine)) {
-                       struct i915_request *rq;
-
-                       rq = i915_request_alloc(engine,
-                                               dev_priv->kernel_context);
-                       if (!IS_ERR(rq))
-                               i915_request_add(rq);
-               }
-       }
-
-       i915_gem_restore_fences(dev_priv);
-}
-
-void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
-{
-       engine->reset.finish(engine);
-
-       intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
-}
-
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       for_each_engine(engine, dev_priv, id) {
-               engine->hangcheck.active_request = NULL;
-               i915_gem_reset_finish_engine(engine);
-       }
-}
-
-static void nop_submit_request(struct i915_request *request)
-{
-       unsigned long flags;
-
-       GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
-                 request->engine->name,
-                 request->fence.context, request->fence.seqno);
-       dma_fence_set_error(&request->fence, -EIO);
-
-       spin_lock_irqsave(&request->engine->timeline.lock, flags);
-       __i915_request_submit(request);
-       intel_engine_init_global_seqno(request->engine, request->global_seqno);
-       spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
-}
-
-void i915_gem_set_wedged(struct drm_i915_private *i915)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       GEM_TRACE("start\n");
-
-       if (GEM_SHOW_DEBUG()) {
-               struct drm_printer p = drm_debug_printer(__func__);
-
-               for_each_engine(engine, i915, id)
-                       intel_engine_dump(engine, &p, "%s\n", engine->name);
-       }
-
-       if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags))
-               goto out;
-
-       /*
-        * First, stop submission to hw, but do not yet complete requests by
-        * rolling the global seqno forward (since this would complete requests
-        * for which we haven't set the fence error to EIO yet).
-        */
-       for_each_engine(engine, i915, id)
-               i915_gem_reset_prepare_engine(engine);
-
-       /* Even if the GPU reset fails, it should still stop the engines */
-       if (INTEL_GEN(i915) >= 5)
-               intel_gpu_reset(i915, ALL_ENGINES);
-
-       for_each_engine(engine, i915, id) {
-               engine->submit_request = nop_submit_request;
-               engine->schedule = NULL;
-       }
-       i915->caps.scheduler = 0;
-
-       /*
-        * Make sure no request can slip through without getting completed by
-        * either this call here to intel_engine_init_global_seqno, or the one
-        * in nop_submit_request.
-        */
-       synchronize_rcu();
-
-       /* Mark all executing requests as skipped */
-       for_each_engine(engine, i915, id)
-               engine->cancel_requests(engine);
-
-       for_each_engine(engine, i915, id) {
-               i915_gem_reset_finish_engine(engine);
-               intel_engine_wakeup(engine);
-       }
-
-out:
-       GEM_TRACE("end\n");
-
-       wake_up_all(&i915->gpu_error.reset_queue);
-}
-
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
-{
-       struct i915_timeline *tl;
-
-       lockdep_assert_held(&i915->drm.struct_mutex);
-       if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
-               return true;
-
-       GEM_TRACE("start\n");
-
-       /*
-        * Before unwedging, make sure that all pending operations
-        * are flushed and errored out - we may have requests waiting upon
-        * third party fences. We marked all inflight requests as EIO, and
-        * every execbuf since returned EIO, for consistency we want all
-        * the currently pending requests to also be marked as EIO, which
-        * is done inside our nop_submit_request - and so we must wait.
-        *
-        * No more can be submitted until we reset the wedged bit.
-        */
-       list_for_each_entry(tl, &i915->gt.timelines, link) {
-               struct i915_request *rq;
-
-               rq = i915_gem_active_peek(&tl->last_request,
-                                         &i915->drm.struct_mutex);
-               if (!rq)
-                       continue;
-
-               /*
-                * We can't use our normal waiter as we want to
-                * avoid recursively trying to handle the current
-                * reset. The basic dma_fence_default_wait() installs
-                * a callback for dma_fence_signal(), which is
-                * triggered by our nop handler (indirectly, the
-                * callback enables the signaler thread which is
-                * woken by the nop_submit_request() advancing the seqno
-                * and when the seqno passes the fence, the signaler
-                * then signals the fence waking us up).
-                */
-               if (dma_fence_default_wait(&rq->fence, true,
-                                          MAX_SCHEDULE_TIMEOUT) < 0)
-                       return false;
-       }
-       i915_retire_requests(i915);
-       GEM_BUG_ON(i915->gt.active_requests);
-
-       if (!intel_gpu_reset(i915, ALL_ENGINES))
-               intel_engines_sanitize(i915);
-
-       /*
-        * Undo nop_submit_request. We prevent all new i915 requests from
-        * being queued (by disallowing execbuf whilst wedged) so having
-        * waited for all active requests above, we know the system is idle
-        * and do not have to worry about a thread being inside
-        * engine->submit_request() as we swap over. So unlike installing
-        * the nop_submit_request on reset, we can do this from normal
-        * context and do not require stop_machine().
-        */
-       intel_engines_reset_default_submission(i915);
-       i915_gem_contexts_lost(i915);
-
-       GEM_TRACE("end\n");
-
-       smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
-       clear_bit(I915_WEDGED, &i915->gpu_error.flags);
-
-       return true;
-}
-
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
@@ -3547,7 +3038,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
 
        GEM_BUG_ON(i915->gt.active_requests);
        for_each_engine(engine, i915, id) {
-               GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+               GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
                GEM_BUG_ON(engine->last_retired_context !=
                           to_intel_context(i915->kernel_context, engine));
        }
@@ -3766,33 +3257,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        return ret;
 }
 
-static long wait_for_timeline(struct i915_timeline *tl,
-                             unsigned int flags, long timeout)
-{
-       struct i915_request *rq;
-
-       rq = i915_gem_active_get_unlocked(&tl->last_request);
-       if (!rq)
-               return timeout;
-
-       /*
-        * "Race-to-idle".
-        *
-        * Switching to the kernel context is often used a synchronous
-        * step prior to idling, e.g. in suspend for flushing all
-        * current operations to memory before sleeping. These we
-        * want to complete as quickly as possible to avoid prolonged
-        * stalls, so allow the gpu to boost to maximum clocks.
-        */
-       if (flags & I915_WAIT_FOR_IDLE_BOOST)
-               gen6_rps_boost(rq, NULL);
-
-       timeout = i915_request_wait(rq, flags, timeout);
-       i915_request_put(rq);
-
-       return timeout;
-}
-
 static int wait_for_engines(struct drm_i915_private *i915)
 {
        if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
@@ -3806,6 +3270,52 @@ static int wait_for_engines(struct drm_i915_private *i915)
        return 0;
 }
 
+static long
+wait_for_timelines(struct drm_i915_private *i915,
+                  unsigned int flags, long timeout)
+{
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+       struct i915_timeline *tl;
+
+       if (!READ_ONCE(i915->gt.active_requests))
+               return timeout;
+
+       mutex_lock(&gt->mutex);
+       list_for_each_entry(tl, &gt->active_list, link) {
+               struct i915_request *rq;
+
+               rq = i915_active_request_get_unlocked(&tl->last_request);
+               if (!rq)
+                       continue;
+
+               mutex_unlock(&gt->mutex);
+
+               /*
+                * "Race-to-idle".
+                *
+                * Switching to the kernel context is often used a synchronous
+                * step prior to idling, e.g. in suspend for flushing all
+                * current operations to memory before sleeping. These we
+                * want to complete as quickly as possible to avoid prolonged
+                * stalls, so allow the gpu to boost to maximum clocks.
+                */
+               if (flags & I915_WAIT_FOR_IDLE_BOOST)
+                       gen6_rps_boost(rq, NULL);
+
+               timeout = i915_request_wait(rq, flags, timeout);
+               i915_request_put(rq);
+               if (timeout < 0)
+                       return timeout;
+
+               /* restart after reacquiring the lock */
+               mutex_lock(&gt->mutex);
+               tl = list_entry(&gt->active_list, typeof(*tl), link);
+       }
+       mutex_unlock(&gt->mutex);
+
+       return timeout;
+}
+
 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
                           unsigned int flags, long timeout)
 {
@@ -3817,17 +3327,15 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
        if (!READ_ONCE(i915->gt.awake))
                return 0;
 
+       timeout = wait_for_timelines(i915, flags, timeout);
+       if (timeout < 0)
+               return timeout;
+
        if (flags & I915_WAIT_LOCKED) {
-               struct i915_timeline *tl;
                int err;
 
                lockdep_assert_held(&i915->drm.struct_mutex);
 
-               list_for_each_entry(tl, &i915->gt.timelines, link) {
-                       timeout = wait_for_timeline(tl, flags, timeout);
-                       if (timeout < 0)
-                               return timeout;
-               }
                if (GEM_SHOW_DEBUG() && !timeout) {
                        /* Presume that timeout was non-zero to begin with! */
                        dev_warn(&i915->drm.pdev->dev,
@@ -3841,17 +3349,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
 
                i915_retire_requests(i915);
                GEM_BUG_ON(i915->gt.active_requests);
-       } else {
-               struct intel_engine_cs *engine;
-               enum intel_engine_id id;
-
-               for_each_engine(engine, i915, id) {
-                       struct i915_timeline *tl = &engine->timeline;
-
-                       timeout = wait_for_timeline(tl, flags, timeout);
-                       if (timeout < 0)
-                               return timeout;
-               }
        }
 
        return 0;
@@ -4037,7 +3534,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
         * reading an invalid PTE on older architectures.
         */
 restart:
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+       list_for_each_entry(vma, &obj->vma.list, obj_link) {
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
@@ -4115,7 +3612,7 @@ restart:
                         */
                }
 
-               list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               list_for_each_entry(vma, &obj->vma.list, obj_link) {
                        if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
@@ -4125,7 +3622,7 @@ restart:
                }
        }
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link)
+       list_for_each_entry(vma, &obj->vma.list, obj_link)
                vma->node.color = cache_level;
        i915_gem_object_set_cache_coherency(obj, cache_level);
        obj->cache_dirty = true; /* Always invalidate stale cachelines */
@@ -4688,7 +4185,8 @@ out:
 }
 
 static void
-frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
+frontbuffer_retire(struct i915_active_request *active,
+                  struct i915_request *request)
 {
        struct drm_i915_gem_object *obj =
                container_of(active, typeof(*obj), frontbuffer_write);
@@ -4701,7 +4199,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 {
        mutex_init(&obj->mm.lock);
 
-       INIT_LIST_HEAD(&obj->vma_list);
+       spin_lock_init(&obj->vma.lock);
+       INIT_LIST_HEAD(&obj->vma.list);
+
        INIT_LIST_HEAD(&obj->lut_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
 
@@ -4713,7 +4213,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        obj->resv = &obj->__builtin_resv;
 
        obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
-       init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
+       i915_active_request_init(&obj->frontbuffer_write,
+                                NULL, frontbuffer_retire);
 
        obj->mm.madv = I915_MADV_WILLNEED;
        INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
@@ -4856,8 +4357,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                                    struct llist_node *freed)
 {
        struct drm_i915_gem_object *obj, *on;
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        llist_for_each_entry_safe(obj, on, freed, freed) {
                struct i915_vma *vma, *vn;
 
@@ -4866,14 +4368,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                mutex_lock(&i915->drm.struct_mutex);
 
                GEM_BUG_ON(i915_gem_object_is_active(obj));
-               list_for_each_entry_safe(vma, vn,
-                                        &obj->vma_list, obj_link) {
+               list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
                        GEM_BUG_ON(i915_vma_is_active(vma));
                        vma->flags &= ~I915_VMA_PIN_MASK;
                        i915_vma_destroy(vma);
                }
-               GEM_BUG_ON(!list_empty(&obj->vma_list));
-               GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
+               GEM_BUG_ON(!list_empty(&obj->vma.list));
+               GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
 
                /* This serializes freeing with the shrinker. Since the free
                 * is delayed, first by RCU then by the workqueue, we want the
@@ -4918,7 +4419,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                if (on)
                        cond_resched();
        }
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
@@ -5027,13 +4528,11 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
 
 void i915_gem_sanitize(struct drm_i915_private *i915)
 {
-       int err;
+       intel_wakeref_t wakeref;
 
        GEM_TRACE("\n");
 
-       mutex_lock(&i915->drm.struct_mutex);
-
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
 
        /*
@@ -5053,28 +4552,28 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
         * it may impact the display and we are uncertain about the stability
         * of the reset, so this could be applied to even earlier gen.
         */
-       err = -ENODEV;
-       if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
-               err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
-       if (!err)
-               intel_engines_sanitize(i915);
+       intel_engines_sanitize(i915, false);
 
        intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
+       mutex_lock(&i915->drm.struct_mutex);
        i915_gem_contexts_lost(i915);
        mutex_unlock(&i915->drm.struct_mutex);
 }
 
 int i915_gem_suspend(struct drm_i915_private *i915)
 {
+       intel_wakeref_t wakeref;
        int ret;
 
        GEM_TRACE("\n");
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        intel_suspend_gt_powersave(i915);
 
+       flush_workqueue(i915->wq);
+
        mutex_lock(&i915->drm.struct_mutex);
 
        /*
@@ -5104,11 +4603,9 @@ int i915_gem_suspend(struct drm_i915_private *i915)
        i915_retire_requests(i915); /* ensure we flush after wedging */
 
        mutex_unlock(&i915->drm.struct_mutex);
+       i915_reset_flush(i915);
 
-       intel_uc_suspend(i915);
-
-       cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
-       cancel_delayed_work_sync(&i915->gt.retire_work);
+       drain_delayed_work(&i915->gt.retire_work);
 
        /*
         * As the idle_work is rearming if it detects a race, play safe and
@@ -5116,6 +4613,8 @@ int i915_gem_suspend(struct drm_i915_private *i915)
         */
        drain_delayed_work(&i915->gt.idle_work);
 
+       intel_uc_suspend(i915);
+
        /*
         * Assert that we successfully flushed all the work and
         * reset the GPU back to its idle, low power state.
@@ -5124,12 +4623,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
        if (WARN_ON(!intel_engines_are_idle(i915)))
                i915_gem_set_wedged(i915); /* no hope, discard everything */
 
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        return 0;
 
 err_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        return ret;
 }
 
@@ -5223,15 +4722,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
        I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
                                 DISP_TILE_SURFACE_SWIZZLING);
 
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                return;
 
        I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN(dev_priv, 6))
                I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
-       else if (IS_GEN7(dev_priv))
+       else if (IS_GEN(dev_priv, 7))
                I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
-       else if (IS_GEN8(dev_priv))
+       else if (IS_GEN(dev_priv, 8))
                I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
        else
                BUG();
@@ -5253,10 +4752,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
                init_unused_ring(dev_priv, SRB1_BASE);
                init_unused_ring(dev_priv, SRB2_BASE);
                init_unused_ring(dev_priv, SRB3_BASE);
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                init_unused_ring(dev_priv, SRB0_BASE);
                init_unused_ring(dev_priv, SRB1_BASE);
-       } else if (IS_GEN3(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 3)) {
                init_unused_ring(dev_priv, PRB1_BASE);
                init_unused_ring(dev_priv, PRB2_BASE);
        }
@@ -5552,6 +5051,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                dev_priv->gt.cleanup_engine = intel_engine_cleanup;
        }
 
+       i915_timelines_init(dev_priv);
+
        ret = i915_gem_init_userptr(dev_priv);
        if (ret)
                return ret;
@@ -5580,7 +5081,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
        }
 
        ret = i915_gem_init_scratch(dev_priv,
-                                   IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
+                                   IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
                goto err_ggtt;
@@ -5674,8 +5175,10 @@ err_unlock:
 err_uc_misc:
        intel_uc_fini_misc(dev_priv);
 
-       if (ret != -EIO)
+       if (ret != -EIO) {
                i915_gem_cleanup_userptr(dev_priv);
+               i915_timelines_fini(dev_priv);
+       }
 
        if (ret == -EIO) {
                mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5726,6 +5229,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
 
        intel_uc_fini_misc(dev_priv);
        i915_gem_cleanup_userptr(dev_priv);
+       i915_timelines_fini(dev_priv);
 
        i915_gem_drain_freed_objects(dev_priv);
 
@@ -5828,7 +5332,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
        if (!dev_priv->priorities)
                goto err_dependencies;
 
-       INIT_LIST_HEAD(&dev_priv->gt.timelines);
        INIT_LIST_HEAD(&dev_priv->gt.active_rings);
        INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
 
@@ -5840,6 +5343,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
                          i915_gem_idle_work_handler);
        init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+       mutex_init(&dev_priv->gpu_error.wedge_mutex);
 
        atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
 
@@ -5871,7 +5375,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
        GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
        WARN_ON(dev_priv->mm.object_count);
-       WARN_ON(!list_empty(&dev_priv->gt.timelines));
 
        kmem_cache_destroy(dev_priv->priorities);
        kmem_cache_destroy(dev_priv->dependencies);
index 4ec386950f75d72e1337a7f93f986b514caad875..280813a4bf82a6fdd223c1b6f8dcbcdbe2980e12 100644 (file)
  */
 
 #include <linux/log2.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
+#include "intel_lrc_reg.h"
 #include "intel_workarounds.h"
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
@@ -311,7 +311,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
                address_mode = INTEL_LEGACY_64B_CONTEXT;
        desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-       if (IS_GEN8(i915))
+       if (IS_GEN(i915, 8))
                desc |= GEN8_CTX_L3LLC_COHERENT;
 
        /* TODO: WaDisableLiteRestore when we start using semaphore
@@ -322,6 +322,32 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
        return desc;
 }
 
+static void intel_context_retire(struct i915_active_request *active,
+                                struct i915_request *rq)
+{
+       struct intel_context *ce =
+               container_of(active, typeof(*ce), active_tracker);
+
+       intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+                  struct i915_gem_context *ctx,
+                  struct intel_engine_cs *engine)
+{
+       ce->gem_context = ctx;
+
+       INIT_LIST_HEAD(&ce->signal_link);
+       INIT_LIST_HEAD(&ce->signals);
+
+       /* Use the whole device by default */
+       ce->sseu = intel_device_default_sseu(ctx->i915);
+
+       i915_active_request_init(&ce->active_tracker,
+                                NULL, intel_context_retire);
+}
+
 static struct i915_gem_context *
 __create_hw_context(struct drm_i915_private *dev_priv,
                    struct drm_i915_file_private *file_priv)
@@ -339,11 +365,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        ctx->i915 = dev_priv;
        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 
-       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
-               struct intel_context *ce = &ctx->__engine[n];
-
-               ce->gem_context = ctx;
-       }
+       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
+               intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
@@ -646,8 +669,8 @@ last_request_on_engine(struct i915_timeline *timeline,
 
        GEM_BUG_ON(timeline == &engine->timeline);
 
-       rq = i915_gem_active_raw(&timeline->last_request,
-                                &engine->i915->drm.struct_mutex);
+       rq = i915_active_request_raw(&timeline->last_request,
+                                    &engine->i915->drm.struct_mutex);
        if (rq && rq->engine == engine) {
                GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
                          timeline->name, engine->name,
@@ -840,6 +863,56 @@ out:
        return 0;
 }
 
+static int get_sseu(struct i915_gem_context *ctx,
+                   struct drm_i915_gem_context_param *args)
+{
+       struct drm_i915_gem_context_param_sseu user_sseu;
+       struct intel_engine_cs *engine;
+       struct intel_context *ce;
+       int ret;
+
+       if (args->size == 0)
+               goto out;
+       else if (args->size < sizeof(user_sseu))
+               return -EINVAL;
+
+       if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+                          sizeof(user_sseu)))
+               return -EFAULT;
+
+       if (user_sseu.flags || user_sseu.rsvd)
+               return -EINVAL;
+
+       engine = intel_engine_lookup_user(ctx->i915,
+                                         user_sseu.engine_class,
+                                         user_sseu.engine_instance);
+       if (!engine)
+               return -EINVAL;
+
+       /* Only use for mutex here is to serialize get_param and set_param. */
+       ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+       if (ret)
+               return ret;
+
+       ce = to_intel_context(ctx, engine);
+
+       user_sseu.slice_mask = ce->sseu.slice_mask;
+       user_sseu.subslice_mask = ce->sseu.subslice_mask;
+       user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
+       user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+
+       mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+       if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
+                        sizeof(user_sseu)))
+               return -EFAULT;
+
+out:
+       args->size = sizeof(user_sseu);
+
+       return 0;
+}
+
 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file)
 {
@@ -852,15 +925,17 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
        if (!ctx)
                return -ENOENT;
 
-       args->size = 0;
        switch (args->param) {
        case I915_CONTEXT_PARAM_BAN_PERIOD:
                ret = -EINVAL;
                break;
        case I915_CONTEXT_PARAM_NO_ZEROMAP:
+               args->size = 0;
                args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
                break;
        case I915_CONTEXT_PARAM_GTT_SIZE:
+               args->size = 0;
+
                if (ctx->ppgtt)
                        args->value = ctx->ppgtt->vm.total;
                else if (to_i915(dev)->mm.aliasing_ppgtt)
@@ -869,14 +944,20 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                        args->value = to_i915(dev)->ggtt.vm.total;
                break;
        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+               args->size = 0;
                args->value = i915_gem_context_no_error_capture(ctx);
                break;
        case I915_CONTEXT_PARAM_BANNABLE:
+               args->size = 0;
                args->value = i915_gem_context_is_bannable(ctx);
                break;
        case I915_CONTEXT_PARAM_PRIORITY:
+               args->size = 0;
                args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
                break;
+       case I915_CONTEXT_PARAM_SSEU:
+               ret = get_sseu(ctx, args);
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -886,6 +967,281 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
        return ret;
 }
 
+static int gen8_emit_rpcs_config(struct i915_request *rq,
+                                struct intel_context *ce,
+                                struct intel_sseu sseu)
+{
+       u64 offset;
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, 4);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       offset = i915_ggtt_offset(ce->state) +
+                LRC_STATE_PN * PAGE_SIZE +
+                (CTX_R_PWR_CLK_STATE + 1) * 4;
+
+       *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+       *cs++ = lower_32_bits(offset);
+       *cs++ = upper_32_bits(offset);
+       *cs++ = gen8_make_rpcs(rq->i915, &sseu);
+
+       intel_ring_advance(rq, cs);
+
+       return 0;
+}
+
+static int
+gen8_modify_rpcs_gpu(struct intel_context *ce,
+                    struct intel_engine_cs *engine,
+                    struct intel_sseu sseu)
+{
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_request *rq, *prev;
+       intel_wakeref_t wakeref;
+       int ret;
+
+       GEM_BUG_ON(!ce->pin_count);
+
+       lockdep_assert_held(&i915->drm.struct_mutex);
+
+       /* Submitting requests etc needs the hw awake. */
+       wakeref = intel_runtime_pm_get(i915);
+
+       rq = i915_request_alloc(engine, i915->kernel_context);
+       if (IS_ERR(rq)) {
+               ret = PTR_ERR(rq);
+               goto out_put;
+       }
+
+       /* Queue this switch after all other activity by this context. */
+       prev = i915_active_request_raw(&ce->ring->timeline->last_request,
+                                      &i915->drm.struct_mutex);
+       if (prev && !i915_request_completed(prev)) {
+               ret = i915_request_await_dma_fence(rq, &prev->fence);
+               if (ret < 0)
+                       goto out_add;
+       }
+
+       /* Order all following requests to be after. */
+       ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
+       if (ret)
+               goto out_add;
+
+       ret = gen8_emit_rpcs_config(rq, ce, sseu);
+       if (ret)
+               goto out_add;
+
+       /*
+        * Guarantee context image and the timeline remains pinned until the
+        * modifying request is retired by setting the ce activity tracker.
+        *
+        * But we only need to take one pin on the account of it. Or in other
+        * words transfer the pinned ce object to tracked active request.
+        */
+       if (!i915_active_request_isset(&ce->active_tracker))
+               __intel_context_pin(ce);
+       __i915_active_request_set(&ce->active_tracker, rq);
+
+out_add:
+       i915_request_add(rq);
+out_put:
+       intel_runtime_pm_put(i915, wakeref);
+
+       return ret;
+}
+
+static int
+__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
+                                   struct intel_engine_cs *engine,
+                                   struct intel_sseu sseu)
+{
+       struct intel_context *ce = to_intel_context(ctx, engine);
+       int ret = 0;
+
+       GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
+       GEM_BUG_ON(engine->id != RCS);
+
+       /* Nothing to do if unmodified. */
+       if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
+               return 0;
+
+       /*
+        * If context is not idle we have to submit an ordered request to modify
+        * its context image via the kernel context. Pristine and idle contexts
+        * will be configured on pinning.
+        */
+       if (ce->pin_count)
+               ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
+
+       if (!ret)
+               ce->sseu = sseu;
+
+       return ret;
+}
+
+static int
+i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
+                                 struct intel_engine_cs *engine,
+                                 struct intel_sseu sseu)
+{
+       int ret;
+
+       ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+       if (ret)
+               return ret;
+
+       ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+
+       mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+       return ret;
+}
+
+static int
+user_to_context_sseu(struct drm_i915_private *i915,
+                    const struct drm_i915_gem_context_param_sseu *user,
+                    struct intel_sseu *context)
+{
+       const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
+
+       /* No zeros in any field. */
+       if (!user->slice_mask || !user->subslice_mask ||
+           !user->min_eus_per_subslice || !user->max_eus_per_subslice)
+               return -EINVAL;
+
+       /* Max > min. */
+       if (user->max_eus_per_subslice < user->min_eus_per_subslice)
+               return -EINVAL;
+
+       /*
+        * Some future proofing on the types since the uAPI is wider than the
+        * current internal implementation.
+        */
+       if (overflows_type(user->slice_mask, context->slice_mask) ||
+           overflows_type(user->subslice_mask, context->subslice_mask) ||
+           overflows_type(user->min_eus_per_subslice,
+                          context->min_eus_per_subslice) ||
+           overflows_type(user->max_eus_per_subslice,
+                          context->max_eus_per_subslice))
+               return -EINVAL;
+
+       /* Check validity against hardware. */
+       if (user->slice_mask & ~device->slice_mask)
+               return -EINVAL;
+
+       if (user->subslice_mask & ~device->subslice_mask[0])
+               return -EINVAL;
+
+       if (user->max_eus_per_subslice > device->max_eus_per_subslice)
+               return -EINVAL;
+
+       context->slice_mask = user->slice_mask;
+       context->subslice_mask = user->subslice_mask;
+       context->min_eus_per_subslice = user->min_eus_per_subslice;
+       context->max_eus_per_subslice = user->max_eus_per_subslice;
+
+       /* Part specific restrictions. */
+       if (IS_GEN(i915, 11)) {
+               unsigned int hw_s = hweight8(device->slice_mask);
+               unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
+               unsigned int req_s = hweight8(context->slice_mask);
+               unsigned int req_ss = hweight8(context->subslice_mask);
+
+               /*
+                * Only full subslice enablement is possible if more than one
+                * slice is turned on.
+                */
+               if (req_s > 1 && req_ss != hw_ss_per_s)
+                       return -EINVAL;
+
+               /*
+                * If more than four (SScount bitfield limit) subslices are
+                * requested then the number has to be even.
+                */
+               if (req_ss > 4 && (req_ss & 1))
+                       return -EINVAL;
+
+               /*
+                * If only one slice is enabled and subslice count is below the
+                * device full enablement, it must be at most half of the all
+                * available subslices.
+                */
+               if (req_s == 1 && req_ss < hw_ss_per_s &&
+                   req_ss > (hw_ss_per_s / 2))
+                       return -EINVAL;
+
+               /* ABI restriction - VME use case only. */
+
+               /* All slices or one slice only. */
+               if (req_s != 1 && req_s != hw_s)
+                       return -EINVAL;
+
+               /*
+                * Half subslices or full enablement only when one slice is
+                * enabled.
+                */
+               if (req_s == 1 &&
+                   (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
+                       return -EINVAL;
+
+               /* No EU configuration changes. */
+               if ((user->min_eus_per_subslice !=
+                    device->max_eus_per_subslice) ||
+                   (user->max_eus_per_subslice !=
+                    device->max_eus_per_subslice))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int set_sseu(struct i915_gem_context *ctx,
+                   struct drm_i915_gem_context_param *args)
+{
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_context_param_sseu user_sseu;
+       struct intel_engine_cs *engine;
+       struct intel_sseu sseu;
+       int ret;
+
+       if (args->size < sizeof(user_sseu))
+               return -EINVAL;
+
+       if (!IS_GEN(i915, 11))
+               return -ENODEV;
+
+       if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+                          sizeof(user_sseu)))
+               return -EFAULT;
+
+       if (user_sseu.flags || user_sseu.rsvd)
+               return -EINVAL;
+
+       engine = intel_engine_lookup_user(i915,
+                                         user_sseu.engine_class,
+                                         user_sseu.engine_instance);
+       if (!engine)
+               return -EINVAL;
+
+       /* Only render engine supports RPCS configuration. */
+       if (engine->class != RENDER_CLASS)
+               return -ENODEV;
+
+       ret = user_to_context_sseu(i915, &user_sseu, &sseu);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+       if (ret)
+               return ret;
+
+       args->size = sizeof(user_sseu);
+
+       return 0;
+}
+
 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file)
 {
@@ -948,7 +1304,9 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                                        I915_USER_PRIORITY(priority);
                }
                break;
-
+       case I915_CONTEXT_PARAM_SSEU:
+               ret = set_sseu(ctx, args);
+               break;
        default:
                ret = -EINVAL;
                break;
index f6d870b1f73e397971e4f55714d8524f68220395..ca150a764c24d48547106108692e1ac5b2e9ed82 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "i915_gem.h"
 #include "i915_scheduler.h"
+#include "intel_device_info.h"
 
 struct pid;
 
@@ -53,6 +54,16 @@ struct intel_context_ops {
        void (*destroy)(struct intel_context *ce);
 };
 
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+       u8 slice_mask;
+       u8 subslice_mask;
+       u8 min_eus_per_subslice;
+       u8 max_eus_per_subslice;
+};
+
 /**
  * struct i915_gem_context - client state
  *
@@ -164,13 +175,24 @@ struct i915_gem_context {
        struct intel_context {
                struct i915_gem_context *gem_context;
                struct intel_engine_cs *active;
+               struct list_head signal_link;
+               struct list_head signals;
                struct i915_vma *state;
                struct intel_ring *ring;
                u32 *lrc_reg_state;
                u64 lrc_desc;
                int pin_count;
 
+               /**
+                * active_tracker: Active tracker for the external rq activity
+                * on this intel_context object.
+                */
+               struct i915_active_request active_tracker;
+
                const struct intel_context_ops *ops;
+
+               /** sseu: Control eu/slice partitioning */
+               struct intel_sseu sseu;
        } __engine[I915_NUM_ENGINES];
 
        /** ring_size: size for allocating the per-engine ring buffer */
@@ -364,4 +386,8 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
        kref_put(&ctx->ref, i915_gem_context_release);
 }
 
+void intel_context_init(struct intel_context *ce,
+                       struct i915_gem_context *ctx,
+                       struct intel_engine_cs *engine);
+
 #endif /* !__I915_GEM_CONTEXT_H__ */
index 82e2ca17a441eed4c9f562b9d9ee9aa45f2e740a..02f7298bfe57cda5ad1d344adb0ff99079267c00 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/dma-buf.h>
 #include <linux/reservation.h>
 
-#include <drm/drmP.h>
 
 #include "i915_drv.h"
 
index 02b83a5ed96c9ec7b539bec4bdc88ed3ac1946cd..68d74c50ac392dba58ea511386912790399c2a13 100644 (file)
@@ -26,7 +26,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
@@ -127,31 +126,25 @@ i915_gem_evict_something(struct i915_address_space *vm,
        struct drm_i915_private *dev_priv = vm->i915;
        struct drm_mm_scan scan;
        struct list_head eviction_list;
-       struct list_head *phases[] = {
-               &vm->inactive_list,
-               &vm->active_list,
-               NULL,
-       }, **phase;
        struct i915_vma *vma, *next;
        struct drm_mm_node *node;
        enum drm_mm_insert_mode mode;
+       struct i915_vma *active;
        int ret;
 
        lockdep_assert_held(&vm->i915->drm.struct_mutex);
        trace_i915_gem_evict(vm, min_size, alignment, flags);
 
        /*
-        * The goal is to evict objects and amalgamate space in LRU order.
-        * The oldest idle objects reside on the inactive list, which is in
-        * retirement order. The next objects to retire are those in flight,
-        * on the active list, again in retirement order.
+        * The goal is to evict objects and amalgamate space in rough LRU order.
+        * Since both active and inactive objects reside on the same list,
+        * in a mix of creation and last scanned order, as we process the list
+        * we sort it into inactive/active, which keeps the active portion
+        * in a rough MRU order.
         *
         * The retirement sequence is thus:
-        *   1. Inactive objects (already retired)
-        *   2. Active objects (will stall on unbinding)
-        *
-        * On each list, the oldest objects lie at the HEAD with the freshest
-        * object on the TAIL.
+        *   1. Inactive objects (already retired, random order)
+        *   2. Active objects (will stall on unbinding, oldest scanned first)
         */
        mode = DRM_MM_INSERT_BEST;
        if (flags & PIN_HIGH)
@@ -170,17 +163,46 @@ i915_gem_evict_something(struct i915_address_space *vm,
         */
        if (!(flags & PIN_NONBLOCK))
                i915_retire_requests(dev_priv);
-       else
-               phases[1] = NULL;
 
 search_again:
+       active = NULL;
        INIT_LIST_HEAD(&eviction_list);
-       phase = phases;
-       do {
-               list_for_each_entry(vma, *phase, vm_link)
-                       if (mark_free(&scan, vma, flags, &eviction_list))
-                               goto found;
-       } while (*++phase);
+       list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
+               /*
+                * We keep this list in a rough least-recently scanned order
+                * of active elements (inactive elements are cheap to reap).
+                * New entries are added to the end, and we move anything we
+                * scan to the end. The assumption is that the working set
+                * of applications is either steady state (and thanks to the
+                * userspace bo cache it almost always is) or volatile and
+                * frequently replaced after a frame, which are self-evicting!
+                * Given that assumption, the MRU order of the scan list is
+                * fairly static, and keeping it in least-recently scan order
+                * is suitable.
+                *
+                * To notice when we complete one full cycle, we record the
+                * first active element seen, before moving it to the tail.
+                */
+               if (i915_vma_is_active(vma)) {
+                       if (vma == active) {
+                               if (flags & PIN_NONBLOCK)
+                                       break;
+
+                               active = ERR_PTR(-EAGAIN);
+                       }
+
+                       if (active != ERR_PTR(-EAGAIN)) {
+                               if (!active)
+                                       active = vma;
+
+                               list_move_tail(&vma->vm_link, &vm->bound_list);
+                               continue;
+                       }
+               }
+
+               if (mark_free(&scan, vma, flags, &eviction_list))
+                       goto found;
+       }
 
        /* Nothing found, clean up and bail out! */
        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
@@ -389,11 +411,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
  */
 int i915_gem_evict_vm(struct i915_address_space *vm)
 {
-       struct list_head *phases[] = {
-               &vm->inactive_list,
-               &vm->active_list,
-               NULL
-       }, **phase;
        struct list_head eviction_list;
        struct i915_vma *vma, *next;
        int ret;
@@ -413,16 +430,15 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
        }
 
        INIT_LIST_HEAD(&eviction_list);
-       phase = phases;
-       do {
-               list_for_each_entry(vma, *phase, vm_link) {
-                       if (i915_vma_is_pinned(vma))
-                               continue;
+       mutex_lock(&vm->mutex);
+       list_for_each_entry(vma, &vm->bound_list, vm_link) {
+               if (i915_vma_is_pinned(vma))
+                       continue;
 
-                       __i915_vma_pin(vma);
-                       list_add(&vma->evict_link, &eviction_list);
-               }
-       } while (*++phase);
+               __i915_vma_pin(vma);
+               list_add(&vma->evict_link, &eviction_list);
+       }
+       mutex_unlock(&vm->mutex);
 
        ret = 0;
        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
index 485b259127c36fdb4aeb619559e790f394ff412a..02adcaf6ebea69086aa07be57fed55b347636c94 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/sync_file.h>
 #include <linux/uaccess.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_syncobj.h>
 #include <drm/i915_drm.h>
 
@@ -754,6 +753,68 @@ static int eb_select_context(struct i915_execbuffer *eb)
        return 0;
 }
 
+static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
+{
+       struct i915_request *rq;
+
+       /*
+        * Completely unscientific finger-in-the-air estimates for suitable
+        * maximum user request size (to avoid blocking) and then backoff.
+        */
+       if (intel_ring_update_space(ring) >= PAGE_SIZE)
+               return NULL;
+
+       /*
+        * Find a request that after waiting upon, there will be at least half
+        * the ring available. The hysteresis allows us to compete for the
+        * shared ring and should mean that we sleep less often prior to
+        * claiming our resources, but not so long that the ring completely
+        * drains before we can submit our next request.
+        */
+       list_for_each_entry(rq, &ring->request_list, ring_link) {
+               if (__intel_ring_space(rq->postfix,
+                                      ring->emit, ring->size) > ring->size / 2)
+                       break;
+       }
+       if (&rq->ring_link == &ring->request_list)
+               return NULL; /* weird, we will check again later for real */
+
+       return i915_request_get(rq);
+}
+
+static int eb_wait_for_ring(const struct i915_execbuffer *eb)
+{
+       const struct intel_context *ce;
+       struct i915_request *rq;
+       int ret = 0;
+
+       /*
+        * Apply a light amount of backpressure to prevent excessive hogs
+        * from blocking waiting for space whilst holding struct_mutex and
+        * keeping all of their resources pinned.
+        */
+
+       ce = to_intel_context(eb->ctx, eb->engine);
+       if (!ce->ring) /* first use, assume empty! */
+               return 0;
+
+       rq = __eb_wait_for_ring(ce->ring);
+       if (rq) {
+               mutex_unlock(&eb->i915->drm.struct_mutex);
+
+               if (i915_request_wait(rq,
+                                     I915_WAIT_INTERRUPTIBLE,
+                                     MAX_SCHEDULE_TIMEOUT) < 0)
+                       ret = -EINTR;
+
+               i915_request_put(rq);
+
+               mutex_lock(&eb->i915->drm.struct_mutex);
+       }
+
+       return ret;
+}
+
 static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
        struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
@@ -1380,7 +1441,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
                 * batchbuffers.
                 */
                if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-                   IS_GEN6(eb->i915)) {
+                   IS_GEN(eb->i915, 6)) {
                        err = i915_vma_bind(target, target->obj->cache_level,
                                            PIN_GLOBAL);
                        if (WARN_ONCE(err,
@@ -1896,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
        u32 *cs;
        int i;
 
-       if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
+       if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
                DRM_DEBUG("sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
@@ -1977,6 +2038,18 @@ static int eb_submit(struct i915_execbuffer *eb)
                        return err;
        }
 
+       /*
+        * After we completed waiting for other engines (using HW semaphores)
+        * then we can signal that this request/batch is ready to run. This
+        * allows us to determine if the batch is still waiting on the GPU
+        * or actually running by checking the breadcrumb.
+        */
+       if (eb->engine->emit_init_breadcrumb) {
+               err = eb->engine->emit_init_breadcrumb(eb->request);
+               if (err)
+                       return err;
+       }
+
        err = eb->engine->emit_bb_start(eb->request,
                                        eb->batch->node.start +
                                        eb->batch_start_offset,
@@ -2203,6 +2276,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        struct i915_execbuffer eb;
        struct dma_fence *in_fence = NULL;
        struct sync_file *out_fence = NULL;
+       intel_wakeref_t wakeref;
        int out_fence_fd = -1;
        int err;
 
@@ -2273,12 +2347,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
         * wakeref that we hold until the GPU has been idle for at least
         * 100ms.
         */
-       intel_runtime_pm_get(eb.i915);
+       wakeref = intel_runtime_pm_get(eb.i915);
 
        err = i915_mutex_lock_interruptible(dev);
        if (err)
                goto err_rpm;
 
+       err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+       if (unlikely(err))
+               goto err_unlock;
+
        err = eb_relocate(&eb);
        if (err) {
                /*
@@ -2423,9 +2501,10 @@ err_batch_unpin:
 err_vma:
        if (eb.exec)
                eb_release_vmas(&eb);
+err_unlock:
        mutex_unlock(&dev->struct_mutex);
 err_rpm:
-       intel_runtime_pm_put(eb.i915);
+       intel_runtime_pm_put(eb.i915, wakeref);
        i915_gem_context_put(eb.ctx);
 err_destroy:
        eb_destroy(&eb);
index d548ac05ccd7a45994f38960dd121e79b4d6ecf9..e037e94792f3530e202e66c8d0b5c915bf49e454 100644 (file)
@@ -21,7 +21,6 @@
  * IN THE SOFTWARE.
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
         * and explicitly managed for internal users.
         */
 
-       if (IS_GEN2(fence->i915))
+       if (IS_GEN(fence->i915, 2))
                i830_write_fence_reg(fence, vma);
-       else if (IS_GEN3(fence->i915))
+       else if (IS_GEN(fence->i915, 3))
                i915_write_fence_reg(fence, vma);
        else
                i965_write_fence_reg(fence, vma);
@@ -210,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
 static int fence_update(struct drm_i915_fence_reg *fence,
                        struct i915_vma *vma)
 {
+       intel_wakeref_t wakeref;
        int ret;
 
        if (vma) {
@@ -223,7 +223,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
                         i915_gem_object_get_tiling(vma->obj)))
                        return -EINVAL;
 
-               ret = i915_gem_active_retire(&vma->last_fence,
+               ret = i915_active_request_retire(&vma->last_fence,
                                             &vma->obj->base.dev->struct_mutex);
                if (ret)
                        return ret;
@@ -232,7 +232,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
        if (fence->vma) {
                struct i915_vma *old = fence->vma;
 
-               ret = i915_gem_active_retire(&old->last_fence,
+               ret = i915_active_request_retire(&old->last_fence,
                                             &old->obj->base.dev->struct_mutex);
                if (ret)
                        return ret;
@@ -257,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
         * If the device is currently powered down, we will defer the write
         * to the runtime resume, see i915_gem_restore_fences().
         */
-       if (intel_runtime_pm_get_if_in_use(fence->i915)) {
+       wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
+       if (wakeref) {
                fence_write(fence, vma);
-               intel_runtime_pm_put(fence->i915);
+               intel_runtime_pm_put(fence->i915, wakeref);
        }
 
        if (vma) {
@@ -554,8 +555,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
 void
 i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
 {
-       uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-       uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+       u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+       u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
        if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
                /*
@@ -578,7 +579,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
                                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
                        }
                } else {
-                       uint32_t dimm_c0, dimm_c1;
+                       u32 dimm_c0, dimm_c1;
                        dimm_c0 = I915_READ(MAD_DIMM_C0);
                        dimm_c1 = I915_READ(MAD_DIMM_C1);
                        dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
@@ -596,13 +597,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
                                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
                        }
                }
-       } else if (IS_GEN5(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 5)) {
                /* On Ironlake whatever DRAM config, GPU always do
                 * same swizzling setup.
                 */
                swizzle_x = I915_BIT_6_SWIZZLE_9_10;
                swizzle_y = I915_BIT_6_SWIZZLE_9;
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                /* As far as we know, the 865 doesn't have these bit 6
                 * swizzling issues.
                 */
@@ -610,7 +611,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
        } else if (IS_MOBILE(dev_priv) ||
                   IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
-               uint32_t dcc;
+               u32 dcc;
 
                /* On 9xx chipsets, channel interleave by the CPU is
                 * determined by DCC.  For single-channel, neither the CPU
@@ -647,7 +648,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
                }
 
                /* check for L-shaped memory aka modified enhanced addressing */
-               if (IS_GEN4(dev_priv) &&
+               if (IS_GEN(dev_priv, 4) &&
                    !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
                        swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
                        swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
index 99a31ded4dfdfcc8eac68c390be31a76df27feee..09dcaf14121b622aeed9a259367c017bf2ff215d 100644 (file)
@@ -50,4 +50,3 @@ struct drm_i915_fence_reg {
 };
 
 #endif
-
index add1fe7aeb930f2e21e73d12dbf96f095decc5be..d646d37eec2f8a751a3a7f1642bae26841d2b24f 100644 (file)
 
 #include <asm/set_memory.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "i915_vgpu.h"
+#include "i915_reset.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
@@ -474,8 +474,7 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
        spin_unlock(&vm->free_pages.lock);
 }
 
-static void i915_address_space_init(struct i915_address_space *vm,
-                                   struct drm_i915_private *dev_priv)
+static void i915_address_space_init(struct i915_address_space *vm, int subclass)
 {
        /*
         * The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -483,7 +482,8 @@ static void i915_address_space_init(struct i915_address_space *vm,
         * attempt holding the lock is immediately reported by lockdep.
         */
        mutex_init(&vm->mutex);
-       i915_gem_shrinker_taints_mutex(&vm->mutex);
+       lockdep_set_subclass(&vm->mutex, subclass);
+       i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
 
        GEM_BUG_ON(!vm->total);
        drm_mm_init(&vm->mm, 0, vm->total);
@@ -491,9 +491,8 @@ static void i915_address_space_init(struct i915_address_space *vm,
 
        stash_init(&vm->free_pages);
 
-       INIT_LIST_HEAD(&vm->active_list);
-       INIT_LIST_HEAD(&vm->inactive_list);
        INIT_LIST_HEAD(&vm->unbound_list);
+       INIT_LIST_HEAD(&vm->bound_list);
 }
 
 static void i915_address_space_fini(struct i915_address_space *vm)
@@ -1423,8 +1422,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
                        gen8_initialize_pd(vm, pd);
                        gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
                        GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
-
-                       mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
                }
 
                ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
@@ -1490,84 +1487,6 @@ unwind:
        return -ENOMEM;
 }
 
-static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
-                         struct i915_page_directory_pointer *pdp,
-                         u64 start, u64 length,
-                         gen8_pte_t scratch_pte,
-                         struct seq_file *m)
-{
-       struct i915_address_space *vm = &ppgtt->vm;
-       struct i915_page_directory *pd;
-       u32 pdpe;
-
-       gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-               struct i915_page_table *pt;
-               u64 pd_len = length;
-               u64 pd_start = start;
-               u32 pde;
-
-               if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
-                       continue;
-
-               seq_printf(m, "\tPDPE #%d\n", pdpe);
-               gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
-                       u32 pte;
-                       gen8_pte_t *pt_vaddr;
-
-                       if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
-                               continue;
-
-                       pt_vaddr = kmap_atomic_px(pt);
-                       for (pte = 0; pte < GEN8_PTES; pte += 4) {
-                               u64 va = (pdpe << GEN8_PDPE_SHIFT |
-                                         pde << GEN8_PDE_SHIFT |
-                                         pte << GEN8_PTE_SHIFT);
-                               int i;
-                               bool found = false;
-
-                               for (i = 0; i < 4; i++)
-                                       if (pt_vaddr[pte + i] != scratch_pte)
-                                               found = true;
-                               if (!found)
-                                       continue;
-
-                               seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
-                               for (i = 0; i < 4; i++) {
-                                       if (pt_vaddr[pte + i] != scratch_pte)
-                                               seq_printf(m, " %llx", pt_vaddr[pte + i]);
-                                       else
-                                               seq_puts(m, "  SCRATCH ");
-                               }
-                               seq_puts(m, "\n");
-                       }
-                       kunmap_atomic(pt_vaddr);
-               }
-       }
-}
-
-static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
-{
-       struct i915_address_space *vm = &ppgtt->vm;
-       const gen8_pte_t scratch_pte = vm->scratch_pte;
-       u64 start = 0, length = ppgtt->vm.total;
-
-       if (use_4lvl(vm)) {
-               u64 pml4e;
-               struct i915_pml4 *pml4 = &ppgtt->pml4;
-               struct i915_page_directory_pointer *pdp;
-
-               gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-                       if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
-                               continue;
-
-                       seq_printf(m, "    PML4E #%llu\n", pml4e);
-                       gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
-               }
-       } else {
-               gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
-       }
-}
-
 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 {
        struct i915_address_space *vm = &ppgtt->vm;
@@ -1628,7 +1547,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
        /* From bdw, there is support for read-only pages in the PPGTT. */
        ppgtt->vm.has_read_only = true;
 
-       i915_address_space_init(&ppgtt->vm, i915);
+       i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
 
        /* There are only few exceptions for gen >=6. chv and bxt.
         * And we are not sure about the latter so play safe for now.
@@ -1672,7 +1591,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
                gen8_ppgtt_notify_vgt(ppgtt, true);
 
        ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-       ppgtt->debug_dump = gen8_dump_ppgtt;
 
        ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
        ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
@@ -1688,60 +1606,6 @@ err_free:
        return ERR_PTR(err);
 }
 
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
-{
-       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
-       const gen6_pte_t scratch_pte = base->vm.scratch_pte;
-       struct i915_page_table *pt;
-       u32 pte, pde;
-
-       gen6_for_all_pdes(pt, &base->pd, pde) {
-               gen6_pte_t *vaddr;
-
-               if (pt == base->vm.scratch_pt)
-                       continue;
-
-               if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
-                       u32 expected =
-                               GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
-                               GEN6_PDE_VALID;
-                       u32 pd_entry = readl(ppgtt->pd_addr + pde);
-
-                       if (pd_entry != expected)
-                               seq_printf(m,
-                                          "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
-                                          pde,
-                                          pd_entry,
-                                          expected);
-
-                       seq_printf(m, "\tPDE: %x\n", pd_entry);
-               }
-
-               vaddr = kmap_atomic_px(base->pd.page_table[pde]);
-               for (pte = 0; pte < GEN6_PTES; pte += 4) {
-                       int i;
-
-                       for (i = 0; i < 4; i++)
-                               if (vaddr[pte + i] != scratch_pte)
-                                       break;
-                       if (i == 4)
-                               continue;
-
-                       seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
-                                  pde, pte,
-                                  (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
-                       for (i = 0; i < 4; i++) {
-                               if (vaddr[pte + i] != scratch_pte)
-                                       seq_printf(m, " %08x", vaddr[pte + i]);
-                               else
-                                       seq_puts(m, "  SCRATCH");
-                       }
-                       seq_puts(m, "\n");
-               }
-               kunmap_atomic(vaddr);
-       }
-}
-
 /* Write pde (index) from the page directory @pd to the page table @pt */
 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
                                  const unsigned int pde,
@@ -2053,21 +1917,23 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
        if (!vma)
                return ERR_PTR(-ENOMEM);
 
-       init_request_active(&vma->last_fence, NULL);
+       i915_active_init(i915, &vma->active, NULL);
+       INIT_ACTIVE_REQUEST(&vma->last_fence);
 
        vma->vm = &ggtt->vm;
        vma->ops = &pd_vma_ops;
        vma->private = ppgtt;
 
-       vma->active = RB_ROOT;
-
        vma->size = size;
        vma->fence_size = size;
        vma->flags = I915_VMA_GGTT;
        vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
 
        INIT_LIST_HEAD(&vma->obj_link);
+
+       mutex_lock(&vma->vm->mutex);
        list_add(&vma->vm_link, &vma->vm->unbound_list);
+       mutex_unlock(&vma->vm->mutex);
 
        return vma;
 }
@@ -2075,6 +1941,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
 {
        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+       int err;
 
        /*
         * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +1957,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
         * allocator works in address space sizes, so it's multiplied by page
         * size. We allocate at the top of the GTT to avoid fragmentation.
         */
-       return i915_vma_pin(ppgtt->vma,
-                           0, GEN6_PD_ALIGN,
-                           PIN_GLOBAL | PIN_HIGH);
+       err = i915_vma_pin(ppgtt->vma,
+                          0, GEN6_PD_ALIGN,
+                          PIN_GLOBAL | PIN_HIGH);
+       if (err)
+               goto unpin;
+
+       return 0;
+
+unpin:
+       ppgtt->pin_count = 0;
+       return err;
 }
 
 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
@@ -2123,13 +1998,12 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 
        ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
 
-       i915_address_space_init(&ppgtt->base.vm, i915);
+       i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
 
        ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
        ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
        ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
        ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
-       ppgtt->base.debug_dump = gen6_dump_ppgtt;
 
        ppgtt->base.vm.vma_ops.bind_vma    = ppgtt_bind_vma;
        ppgtt->base.vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
@@ -2195,9 +2069,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
 {
        gtt_write_workarounds(dev_priv);
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN(dev_priv, 6))
                gen6_ppgtt_enable(dev_priv);
-       else if (IS_GEN7(dev_priv))
+       else if (IS_GEN(dev_priv, 7))
                gen7_ppgtt_enable(dev_priv);
 
        return 0;
@@ -2238,8 +2112,7 @@ void i915_ppgtt_close(struct i915_address_space *vm)
 static void ppgtt_destroy_vma(struct i915_address_space *vm)
 {
        struct list_head *phases[] = {
-               &vm->active_list,
-               &vm->inactive_list,
+               &vm->bound_list,
                &vm->unbound_list,
                NULL,
        }, **phase;
@@ -2262,8 +2135,7 @@ void i915_ppgtt_release(struct kref *kref)
 
        ppgtt_destroy_vma(&ppgtt->vm);
 
-       GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
-       GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+       GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
        GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
 
        ppgtt->vm.cleanup(&ppgtt->vm);
@@ -2279,7 +2151,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
        /* Query intel_iommu to see if we need the workaround. Presumably that
         * was loaded first.
         */
-       return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
+       return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
 }
 
 static void gen6_check_faults(struct drm_i915_private *dev_priv)
@@ -2372,7 +2244,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
                                     DMA_ATTR_NO_WARN))
                        return 0;
 
-               /* If the DMA remap fails, one cause can be that we have
+               /*
+                * If the DMA remap fails, one cause can be that we have
                 * too many objects pinned in a small remapping table,
                 * such as swiotlb. Incrementally purge all other objects and
                 * try again - if there are no more pages to remove from
@@ -2382,8 +2255,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
        } while (i915_gem_shrink(to_i915(obj->base.dev),
                                 obj->base.size >> PAGE_SHIFT, NULL,
                                 I915_SHRINK_BOUND |
-                                I915_SHRINK_UNBOUND |
-                                I915_SHRINK_ACTIVE));
+                                I915_SHRINK_UNBOUND));
 
        return -ENOSPC;
 }
@@ -2655,6 +2527,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 {
        struct drm_i915_private *i915 = vma->vm->i915;
        struct drm_i915_gem_object *obj = vma->obj;
+       intel_wakeref_t wakeref;
        u32 pte_flags;
 
        /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
@@ -2662,9 +2535,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
 
-       intel_runtime_pm_get(i915);
-       vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-       intel_runtime_pm_put(i915);
+       with_intel_runtime_pm(i915, wakeref)
+               vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
 
        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 
@@ -2681,10 +2553,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
        struct drm_i915_private *i915 = vma->vm->i915;
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_get(i915);
-       vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-       intel_runtime_pm_put(i915);
+       with_intel_runtime_pm(i915, wakeref)
+               vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 }
 
 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
@@ -2716,9 +2588,12 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
        }
 
        if (flags & I915_VMA_GLOBAL_BIND) {
-               intel_runtime_pm_get(i915);
-               vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-               intel_runtime_pm_put(i915);
+               intel_wakeref_t wakeref;
+
+               with_intel_runtime_pm(i915, wakeref) {
+                       vma->vm->insert_entries(vma->vm, vma,
+                                               cache_level, pte_flags);
+               }
        }
 
        return 0;
@@ -2729,9 +2604,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
        struct drm_i915_private *i915 = vma->vm->i915;
 
        if (vma->flags & I915_VMA_GLOBAL_BIND) {
-               intel_runtime_pm_get(i915);
-               vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-               intel_runtime_pm_put(i915);
+               struct i915_address_space *vm = vma->vm;
+               intel_wakeref_t wakeref;
+
+               with_intel_runtime_pm(i915, wakeref)
+                       vm->clear_range(vm, vma->node.start, vma->size);
        }
 
        if (vma->flags & I915_VMA_LOCAL_BIND) {
@@ -2923,8 +2800,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_gem_fini_aliasing_ppgtt(dev_priv);
 
-       GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
-       list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+       list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
                WARN_ON(i915_vma_unbind(vma));
 
        if (drm_mm_node_allocated(&ggtt->error_capture))
@@ -3355,7 +3231,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
 
        /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
-       if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+       if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
+           IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
                ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
                ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
                if (ggtt->vm.clear_range != nop_clear_range)
@@ -3556,7 +3433,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
         * and beyond the end of the GTT if we do not provide a guard.
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
-       i915_address_space_init(&ggtt->vm, dev_priv);
+       i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
 
        ggtt->vm.is_ggtt = true;
 
@@ -3629,32 +3506,39 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 
        i915_check_and_clear_faults(dev_priv);
 
+       mutex_lock(&ggtt->vm.mutex);
+
        /* First fill our portion of the GTT with scratch pages */
        ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
        ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
 
        /* clflush objects bound into the GGTT and rebind them. */
-       GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
-       list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+       list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (!(vma->flags & I915_VMA_GLOBAL_BIND))
                        continue;
 
+               mutex_unlock(&ggtt->vm.mutex);
+
                if (!i915_vma_unbind(vma))
-                       continue;
+                       goto lock;
 
                WARN_ON(i915_vma_bind(vma,
                                      obj ? obj->cache_level : 0,
                                      PIN_UPDATE));
                if (obj)
                        WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+
+lock:
+               mutex_lock(&ggtt->vm.mutex);
        }
 
        ggtt->vm.closed = false;
        i915_ggtt_invalidate(dev_priv);
 
+       mutex_unlock(&ggtt->vm.mutex);
+
        if (INTEL_GEN(dev_priv) >= 8) {
                struct intel_ppat *ppat = &dev_priv->ppat;
 
index 4874da09a3c471d24697b55b4ab7687d2d76afbf..03ade71b8d9a046a5c5581f38eb967018e3ab1a3 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/pagevec.h>
 
 #include "i915_request.h"
+#include "i915_reset.h"
 #include "i915_selftest.h"
 #include "i915_timeline.h"
 
@@ -288,6 +289,8 @@ struct i915_address_space {
        bool closed;
 
        struct mutex mutex; /* protects vma and our lists */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
 
        u64 scratch_pte;
        struct i915_page_dma scratch_page;
@@ -296,32 +299,12 @@ struct i915_address_space {
        struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
 
        /**
-        * List of objects currently involved in rendering.
-        *
-        * Includes buffers having the contents of their GPU caches
-        * flushed, not necessarily primitives. last_read_req
-        * represents when the rendering involved will be completed.
-        *
-        * A reference is held on the buffer while on this list.
+        * List of vma currently bound.
         */
-       struct list_head active_list;
+       struct list_head bound_list;
 
        /**
-        * LRU list of objects which are not in the ringbuffer and
-        * are ready to unbind, but are still in the GTT.
-        *
-        * last_read_req is NULL while an object is in this list.
-        *
-        * A reference is not held on the buffer while on this list,
-        * as merely being GTT-bound shouldn't prevent its being
-        * freed, and we'll pull it off the list in the free path.
-        */
-       struct list_head inactive_list;
-
-       /**
-        * List of vma that have been unbound.
-        *
-        * A reference is not held on the buffer while on this list.
+        * List of vma that are not unbound.
         */
        struct list_head unbound_list;
 
@@ -413,8 +396,6 @@ struct i915_hw_ppgtt {
                struct i915_page_directory_pointer pdp; /* GEN8+ */
                struct i915_page_directory pd;          /* GEN6-7 */
        };
-
-       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
 struct gen6_hw_ppgtt {
@@ -661,19 +642,19 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 
 /* Flags used by pin/bind&friends. */
 #define PIN_NONBLOCK           BIT_ULL(0)
-#define PIN_MAPPABLE           BIT_ULL(1)
-#define PIN_ZONE_4G            BIT_ULL(2)
-#define PIN_NONFAULT           BIT_ULL(3)
-#define PIN_NOEVICT            BIT_ULL(4)
-
-#define PIN_MBZ                        BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL             BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER               BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE             BIT_ULL(8)
-
-#define PIN_HIGH               BIT_ULL(9)
-#define PIN_OFFSET_BIAS                BIT_ULL(10)
-#define PIN_OFFSET_FIXED       BIT_ULL(11)
+#define PIN_NONFAULT           BIT_ULL(1)
+#define PIN_NOEVICT            BIT_ULL(2)
+#define PIN_MAPPABLE           BIT_ULL(3)
+#define PIN_ZONE_4G            BIT_ULL(4)
+#define PIN_HIGH               BIT_ULL(5)
+#define PIN_OFFSET_BIAS                BIT_ULL(6)
+#define PIN_OFFSET_FIXED       BIT_ULL(7)
+
+#define PIN_MBZ                        BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL             BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER               BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE             BIT_ULL(11)
+
 #define PIN_OFFSET_MASK                (-I915_GTT_PAGE_SIZE)
 
 #endif
index 0d0144b2104cb3264b2048a1569bc598fde532f3..fddde1033e747ee65f0914114975f9d8d29c88de 100644 (file)
@@ -22,7 +22,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
index a6dd7c46de0dddb4a8f2c09a21651ac1c1a8cb21..fab040331cdb250d44b5791833da357fa5c82206 100644 (file)
@@ -29,7 +29,8 @@
 
 #include <drm/drm_vma_manager.h>
 #include <drm/drm_gem.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_device.h>
 
 #include <drm/i915_drm.h>
 
@@ -56,6 +57,7 @@ struct drm_i915_gem_object_ops {
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE        BIT(0)
 #define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
 #define I915_GEM_OBJECT_IS_PROXY       BIT(2)
+#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(3)
 
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
@@ -85,24 +87,33 @@ struct drm_i915_gem_object {
 
        const struct drm_i915_gem_object_ops *ops;
 
-       /**
-        * @vma_list: List of VMAs backed by this object
-        *
-        * The VMA on this list are ordered by type, all GGTT vma are placed
-        * at the head and all ppGTT vma are placed at the tail. The different
-        * types of GGTT vma are unordered between themselves, use the
-        * @vma_tree (which has a defined order between all VMA) to find an
-        * exact match.
-        */
-       struct list_head vma_list;
-       /**
-        * @vma_tree: Ordered tree of VMAs backed by this object
-        *
-        * All VMA created for this object are placed in the @vma_tree for
-        * fast retrieval via a binary search in i915_vma_instance().
-        * They are also added to @vma_list for easy iteration.
-        */
-       struct rb_root vma_tree;
+       struct {
+               /**
+                * @vma.lock: protect the list/tree of vmas
+                */
+               spinlock_t lock;
+
+               /**
+                * @vma.list: List of VMAs backed by this object
+                *
+                * The VMA on this list are ordered by type, all GGTT vma are
+                * placed at the head and all ppGTT vma are placed at the tail.
+                * The different types of GGTT vma are unordered between
+                * themselves, use the @vma.tree (which has a defined order
+                * between all VMA) to quickly find an exact match.
+                */
+               struct list_head list;
+
+               /**
+                * @vma.tree: Ordered tree of VMAs backed by this object
+                *
+                * All VMA created for this object are placed in the @vma.tree
+                * for fast retrieval via a binary search in
+                * i915_vma_instance(). They are also added to @vma.list for
+                * easy iteration.
+                */
+               struct rb_root tree;
+       } vma;
 
        /**
         * @lut_list: List of vma lookup entries in use for this object.
@@ -164,7 +175,7 @@ struct drm_i915_gem_object {
 
        atomic_t frontbuffer_bits;
        unsigned int frontbuffer_ggtt_origin; /* write once */
-       struct i915_gem_active frontbuffer_write;
+       struct i915_active_request frontbuffer_write;
 
        /** Current tiling stride for the object, if it's tiled. */
        unsigned int tiling_and_stride;
@@ -386,6 +397,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
        return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
 }
 
+static inline bool
+i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+{
+       return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+}
+
 static inline bool
 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
 {
index ea90d3a0d51143dc4a189b0e15a3ecf7c37c214c..6da795c7e62e47a12207cca861efafb1ea0f45f3 100644 (file)
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 #include <linux/vmalloc.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "i915_trace.h"
 
-static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
+static bool shrinker_lock(struct drm_i915_private *i915,
+                         unsigned int flags,
+                         bool *unlock)
 {
-       switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
+       struct mutex *m = &i915->drm.struct_mutex;
+
+       switch (mutex_trylock_recursive(m)) {
        case MUTEX_TRYLOCK_RECURSIVE:
                *unlock = false;
                return true;
 
        case MUTEX_TRYLOCK_FAILED:
                *unlock = false;
-               preempt_disable();
-               do {
-                       cpu_relax();
-                       if (mutex_trylock(&i915->drm.struct_mutex)) {
-                               *unlock = true;
-                               break;
-                       }
-               } while (!need_resched());
-               preempt_enable();
+               if (flags & I915_SHRINK_ACTIVE &&
+                   mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
+                       *unlock = true;
                return *unlock;
 
        case MUTEX_TRYLOCK_SUCCESS:
@@ -156,11 +153,12 @@ i915_gem_shrink(struct drm_i915_private *i915,
                { &i915->mm.bound_list, I915_SHRINK_BOUND },
                { NULL, 0 },
        }, *phase;
+       intel_wakeref_t wakeref = 0;
        unsigned long count = 0;
        unsigned long scanned = 0;
        bool unlock;
 
-       if (!shrinker_lock(i915, &unlock))
+       if (!shrinker_lock(i915, flags, &unlock))
                return 0;
 
        /*
@@ -185,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
         * device just to recover a little memory. If absolutely necessary,
         * we will force the wake during oom-notifier.
         */
-       if ((flags & I915_SHRINK_BOUND) &&
-           !intel_runtime_pm_get_if_in_use(i915))
-               flags &= ~I915_SHRINK_BOUND;
+       if (flags & I915_SHRINK_BOUND) {
+               wakeref = intel_runtime_pm_get_if_in_use(i915);
+               if (!wakeref)
+                       flags &= ~I915_SHRINK_BOUND;
+       }
 
        /*
         * As we may completely rewrite the (un)bound list whilst unbinding
@@ -268,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
        }
 
        if (flags & I915_SHRINK_BOUND)
-               intel_runtime_pm_put(i915);
+               intel_runtime_pm_put(i915, wakeref);
 
        i915_retire_requests(i915);
 
@@ -295,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
  */
 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
 {
-       unsigned long freed;
-
-       intel_runtime_pm_get(i915);
-       freed = i915_gem_shrink(i915, -1UL, NULL,
-                               I915_SHRINK_BOUND |
-                               I915_SHRINK_UNBOUND |
-                               I915_SHRINK_ACTIVE);
-       intel_runtime_pm_put(i915);
+       intel_wakeref_t wakeref;
+       unsigned long freed = 0;
+
+       with_intel_runtime_pm(i915, wakeref) {
+               freed = i915_gem_shrink(i915, -1UL, NULL,
+                                       I915_SHRINK_BOUND |
+                                       I915_SHRINK_UNBOUND |
+                                       I915_SHRINK_ACTIVE);
+       }
 
        return freed;
 }
@@ -357,7 +358,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 
        sc->nr_scanned = 0;
 
-       if (!shrinker_lock(i915, &unlock))
+       if (!shrinker_lock(i915, 0, &unlock))
                return SHRINK_STOP;
 
        freed = i915_gem_shrink(i915,
@@ -373,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                                         I915_SHRINK_BOUND |
                                         I915_SHRINK_UNBOUND);
        if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
-               intel_runtime_pm_get(i915);
-               freed += i915_gem_shrink(i915,
-                                        sc->nr_to_scan - sc->nr_scanned,
-                                        &sc->nr_scanned,
-                                        I915_SHRINK_ACTIVE |
-                                        I915_SHRINK_BOUND |
-                                        I915_SHRINK_UNBOUND);
-               intel_runtime_pm_put(i915);
+               intel_wakeref_t wakeref;
+
+               with_intel_runtime_pm(i915, wakeref) {
+                       freed += i915_gem_shrink(i915,
+                                                sc->nr_to_scan - sc->nr_scanned,
+                                                &sc->nr_scanned,
+                                                I915_SHRINK_ACTIVE |
+                                                I915_SHRINK_BOUND |
+                                                I915_SHRINK_UNBOUND);
+               }
        }
 
        shrinker_unlock(i915, unlock);
@@ -388,31 +391,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        return sc->nr_scanned ? freed : SHRINK_STOP;
 }
 
-static bool
-shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
-                             int timeout_ms)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
-
-       do {
-               if (i915_gem_wait_for_idle(i915,
-                                          0, MAX_SCHEDULE_TIMEOUT) == 0 &&
-                   shrinker_lock(i915, unlock))
-                       break;
-
-               schedule_timeout_killable(1);
-               if (fatal_signal_pending(current))
-                       return false;
-
-               if (time_after(jiffies, timeout)) {
-                       pr_err("Unable to lock GPU to purge memory.\n");
-                       return false;
-               }
-       } while (1);
-
-       return true;
-}
-
 static int
 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 {
@@ -420,8 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
                container_of(nb, struct drm_i915_private, mm.oom_notifier);
        struct drm_i915_gem_object *obj;
        unsigned long unevictable, bound, unbound, freed_pages;
+       intel_wakeref_t wakeref;
 
-       freed_pages = i915_gem_shrink_all(i915);
+       freed_pages = 0;
+       with_intel_runtime_pm(i915, wakeref)
+               freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+                                              I915_SHRINK_BOUND |
+                                              I915_SHRINK_UNBOUND);
 
        /* Because we may be allocating inside our own driver, we cannot
         * assert that there are no objects with pinned pages that are not
@@ -447,10 +430,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
                pr_info("Purging GPU memory, %lu pages freed, "
                        "%lu pages still pinned.\n",
                        freed_pages, unevictable);
-       if (unbound || bound)
-               pr_err("%lu and %lu pages still available in the "
-                      "bound and unbound GPU page lists.\n",
-                      bound, unbound);
 
        *(unsigned long *)ptr += freed_pages;
        return NOTIFY_DONE;
@@ -463,34 +442,39 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
                container_of(nb, struct drm_i915_private, mm.vmap_notifier);
        struct i915_vma *vma, *next;
        unsigned long freed_pages = 0;
+       intel_wakeref_t wakeref;
        bool unlock;
-       int ret;
 
-       if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
+       if (!shrinker_lock(i915, 0, &unlock))
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_LOCKED,
-                                    MAX_SCHEDULE_TIMEOUT);
-       if (ret)
+       if (i915_gem_wait_for_idle(i915,
+                                  I915_WAIT_LOCKED,
+                                  MAX_SCHEDULE_TIMEOUT))
                goto out;
 
-       intel_runtime_pm_get(i915);
-       freed_pages += i915_gem_shrink(i915, -1UL, NULL,
-                                      I915_SHRINK_BOUND |
-                                      I915_SHRINK_UNBOUND |
-                                      I915_SHRINK_ACTIVE |
-                                      I915_SHRINK_VMAPS);
-       intel_runtime_pm_put(i915);
+       with_intel_runtime_pm(i915, wakeref)
+               freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+                                              I915_SHRINK_BOUND |
+                                              I915_SHRINK_UNBOUND |
+                                              I915_SHRINK_VMAPS);
 
        /* We also want to clear any cached iomaps as they wrap vmap */
+       mutex_lock(&i915->ggtt.vm.mutex);
        list_for_each_entry_safe(vma, next,
-                                &i915->ggtt.vm.inactive_list, vm_link) {
+                                &i915->ggtt.vm.bound_list, vm_link) {
                unsigned long count = vma->node.size >> PAGE_SHIFT;
-               if (vma->iomap && i915_vma_unbind(vma) == 0)
+
+               if (!vma->iomap || i915_vma_is_active(vma))
+                       continue;
+
+               mutex_unlock(&i915->ggtt.vm.mutex);
+               if (i915_vma_unbind(vma) == 0)
                        freed_pages += count;
+               mutex_lock(&i915->ggtt.vm.mutex);
        }
+       mutex_unlock(&i915->ggtt.vm.mutex);
 
 out:
        shrinker_unlock(i915, unlock);
@@ -533,13 +517,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
        unregister_shrinker(&i915->mm.shrinker);
 }
 
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+                                   struct mutex *mutex)
 {
+       bool unlock = false;
+
        if (!IS_ENABLED(CONFIG_LOCKDEP))
                return;
 
+       if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
+               mutex_acquire(&i915->drm.struct_mutex.dep_map,
+                             I915_MM_NORMAL, 0, _RET_IP_);
+               unlock = true;
+       }
+
        fs_reclaim_acquire(GFP_KERNEL);
-       mutex_lock(mutex);
-       mutex_unlock(mutex);
+
+       /*
+        * As we invariably rely on the struct_mutex within the shrinker,
+        * but have a complicated recursion dance, taint all the mutexes used
+        * within the shrinker with the struct_mutex. For completeness, we
+        * taint with all subclass of struct_mutex, even though we should
+        * only need tainting by I915_MM_NORMAL to catch possible ABBA
+        * deadlocks from using struct_mutex inside @mutex.
+        */
+       mutex_acquire(&i915->drm.struct_mutex.dep_map,
+                     I915_MM_SHRINKER, 0, _RET_IP_);
+
+       mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
+       mutex_release(&mutex->dep_map, 0, _RET_IP_);
+
+       mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+
        fs_reclaim_release(GFP_KERNEL);
+
+       if (unlock)
+               mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
 }
index f29a7ff7c362c7d21e60c1d822465cfe6a3d2090..74a9661479ca54b8c04e6141e14d992585aa0d52 100644 (file)
@@ -26,7 +26,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
                resource_size_t ggtt_start;
 
                ggtt_start = I915_READ(PGTBL_CTL);
-               if (IS_GEN4(dev_priv))
+               if (IS_GEN(dev_priv, 4))
                        ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
                                     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
                else
@@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
                 * GEN3 firmware likes to smash pci bridges into the stolen
                 * range. Apparently this works.
                 */
-               if (r == NULL && !IS_GEN3(dev_priv)) {
+               if (r == NULL && !IS_GEN(dev_priv, 3)) {
                        DRM_ERROR("conflict detected with stolen region: %pR\n",
                                  dsm);
 
@@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
         * Whether ILK really reuses the ELK register for this is unclear.
         * Let's see if we catch anyone with this supposedly enabled on ILK.
         */
-       WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
+       WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+            reg_val);
 
        if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
                return;
@@ -701,7 +701,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
        vma->pages = obj->mm.pages;
        vma->flags |= I915_VMA_GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
-       list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
+
+       mutex_lock(&ggtt->vm.mutex);
+       list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
+       mutex_unlock(&ggtt->vm.mutex);
 
        spin_lock(&dev_priv->mm.obj_lock);
        list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
index d9dc9df523b58e9a8645838e9c2ffa2080c54d61..16cc9ddbce34ae8e97e59838c2e687af6a0f52f9 100644 (file)
@@ -27,7 +27,6 @@
 
 #include <linux/string.h>
 #include <linux/bitops.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
        }
 
        /* Previous chips need a power-of-two fence region when tiling */
-       if (IS_GEN3(i915))
+       if (IS_GEN(i915, 3))
                ggtt_size = 1024*1024;
        else
                ggtt_size = 512*1024;
@@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
                        return false;
        }
 
-       if (IS_GEN2(i915) ||
+       if (IS_GEN(i915, 2) ||
            (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
                tile_width = 128;
        else
index 9558582c105ec4953ba0616b23966000a095cfab..1d3f9a31ad61921c17923029712f7a943e664bd6 100644 (file)
@@ -22,7 +22,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
@@ -50,77 +49,67 @@ struct i915_mmu_notifier {
        struct hlist_node node;
        struct mmu_notifier mn;
        struct rb_root_cached objects;
-       struct workqueue_struct *wq;
+       struct i915_mm_struct *mm;
 };
 
 struct i915_mmu_object {
        struct i915_mmu_notifier *mn;
        struct drm_i915_gem_object *obj;
        struct interval_tree_node it;
-       struct list_head link;
-       struct work_struct work;
-       bool attached;
 };
 
-static void cancel_userptr(struct work_struct *work)
+static void add_object(struct i915_mmu_object *mo)
 {
-       struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
-       struct drm_i915_gem_object *obj = mo->obj;
-       struct work_struct *active;
-
-       /* Cancel any active worker and force us to re-evaluate gup */
-       mutex_lock(&obj->mm.lock);
-       active = fetch_and_zero(&obj->userptr.work);
-       mutex_unlock(&obj->mm.lock);
-       if (active)
-               goto out;
-
-       i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
-
-       mutex_lock(&obj->base.dev->struct_mutex);
-
-       /* We are inside a kthread context and can't be interrupted */
-       if (i915_gem_object_unbind(obj) == 0)
-               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-       WARN_ONCE(i915_gem_object_has_pages(obj),
-                 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
-                 obj->bind_count,
-                 atomic_read(&obj->mm.pages_pin_count),
-                 obj->pin_global);
-
-       mutex_unlock(&obj->base.dev->struct_mutex);
-
-out:
-       i915_gem_object_put(obj);
+       GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
+       interval_tree_insert(&mo->it, &mo->mn->objects);
 }
 
-static void add_object(struct i915_mmu_object *mo)
+static void del_object(struct i915_mmu_object *mo)
 {
-       if (mo->attached)
+       if (RB_EMPTY_NODE(&mo->it.rb))
                return;
 
-       interval_tree_insert(&mo->it, &mo->mn->objects);
-       mo->attached = true;
+       interval_tree_remove(&mo->it, &mo->mn->objects);
+       RB_CLEAR_NODE(&mo->it.rb);
 }
 
-static void del_object(struct i915_mmu_object *mo)
+static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
 {
-       if (!mo->attached)
+       struct i915_mmu_object *mo = obj->userptr.mmu_object;
+
+       /*
+        * During mm_invalidate_range we need to cancel any userptr that
+        * overlaps the range being invalidated. Doing so requires the
+        * struct_mutex, and that risks recursion. In order to cause
+        * recursion, the user must alias the userptr address space with
+        * a GTT mmapping (possible with a MAP_FIXED) - then when we have
+        * to invalidate that mmaping, mm_invalidate_range is called with
+        * the userptr address *and* the struct_mutex held.  To prevent that
+        * we set a flag under the i915_mmu_notifier spinlock to indicate
+        * whether this object is valid.
+        */
+       if (!mo)
                return;
 
-       interval_tree_remove(&mo->it, &mo->mn->objects);
-       mo->attached = false;
+       spin_lock(&mo->mn->lock);
+       if (value)
+               add_object(mo);
+       else
+               del_object(mo);
+       spin_unlock(&mo->mn->lock);
 }
 
-static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
-                       const struct mmu_notifier_range *range)
+static int
+userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+                                 const struct mmu_notifier_range *range)
 {
        struct i915_mmu_notifier *mn =
                container_of(_mn, struct i915_mmu_notifier, mn);
-       struct i915_mmu_object *mo;
        struct interval_tree_node *it;
-       LIST_HEAD(cancelled);
+       struct mutex *unlock = NULL;
        unsigned long end;
+       int ret = 0;
 
        if (RB_EMPTY_ROOT(&mn->objects.rb_root))
                return 0;
@@ -131,11 +120,15 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        spin_lock(&mn->lock);
        it = interval_tree_iter_first(&mn->objects, range->start, end);
        while (it) {
+               struct drm_i915_gem_object *obj;
+
                if (!range->blockable) {
-                       spin_unlock(&mn->lock);
-                       return -EAGAIN;
+                       ret = -EAGAIN;
+                       break;
                }
-               /* The mmu_object is released late when destroying the
+
+               /*
+                * The mmu_object is released late when destroying the
                 * GEM object so it is entirely possible to gain a
                 * reference on an object in the process of being freed
                 * since our serialisation is via the spinlock and not
@@ -144,29 +137,65 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
                 * use-after-free we only acquire a reference on the
                 * object if it is not in the process of being destroyed.
                 */
-               mo = container_of(it, struct i915_mmu_object, it);
-               if (kref_get_unless_zero(&mo->obj->base.refcount))
-                       queue_work(mn->wq, &mo->work);
+               obj = container_of(it, struct i915_mmu_object, it)->obj;
+               if (!kref_get_unless_zero(&obj->base.refcount)) {
+                       it = interval_tree_iter_next(it, range->start, end);
+                       continue;
+               }
+               spin_unlock(&mn->lock);
+
+               if (!unlock) {
+                       unlock = &mn->mm->i915->drm.struct_mutex;
+
+                       switch (mutex_trylock_recursive(unlock)) {
+                       default:
+                       case MUTEX_TRYLOCK_FAILED:
+                               if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
+                                       i915_gem_object_put(obj);
+                                       return -EINTR;
+                               }
+                               /* fall through */
+                       case MUTEX_TRYLOCK_SUCCESS:
+                               break;
+
+                       case MUTEX_TRYLOCK_RECURSIVE:
+                               unlock = ERR_PTR(-EEXIST);
+                               break;
+                       }
+               }
+
+               ret = i915_gem_object_unbind(obj);
+               if (ret == 0)
+                       ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+               i915_gem_object_put(obj);
+               if (ret)
+                       goto unlock;
 
-               list_add(&mo->link, &cancelled);
-               it = interval_tree_iter_next(it, range->start, end);
+               spin_lock(&mn->lock);
+
+               /*
+                * As we do not (yet) protect the mmu from concurrent insertion
+                * over this range, there is no guarantee that this search will
+                * terminate given a pathologic workload.
+                */
+               it = interval_tree_iter_first(&mn->objects, range->start, end);
        }
-       list_for_each_entry(mo, &cancelled, link)
-               del_object(mo);
        spin_unlock(&mn->lock);
 
-       if (!list_empty(&cancelled))
-               flush_workqueue(mn->wq);
+unlock:
+       if (!IS_ERR_OR_NULL(unlock))
+               mutex_unlock(unlock);
+
+       return ret;
 
-       return 0;
 }
 
 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
-       .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+       .invalidate_range_start = userptr_mn_invalidate_range_start,
 };
 
 static struct i915_mmu_notifier *
-i915_mmu_notifier_create(struct mm_struct *mm)
+i915_mmu_notifier_create(struct i915_mm_struct *mm)
 {
        struct i915_mmu_notifier *mn;
 
@@ -177,13 +206,7 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT_CACHED;
-       mn->wq = alloc_workqueue("i915-userptr-release",
-                                WQ_UNBOUND | WQ_MEM_RECLAIM,
-                                0);
-       if (mn->wq == NULL) {
-               kfree(mn);
-               return ERR_PTR(-ENOMEM);
-       }
+       mn->mm = mm;
 
        return mn;
 }
@@ -193,16 +216,14 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
        struct i915_mmu_object *mo;
 
-       mo = obj->userptr.mmu_object;
-       if (mo == NULL)
+       mo = fetch_and_zero(&obj->userptr.mmu_object);
+       if (!mo)
                return;
 
        spin_lock(&mo->mn->lock);
        del_object(mo);
        spin_unlock(&mo->mn->lock);
        kfree(mo);
-
-       obj->userptr.mmu_object = NULL;
 }
 
 static struct i915_mmu_notifier *
@@ -215,7 +236,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
        if (mn)
                return mn;
 
-       mn = i915_mmu_notifier_create(mm->mm);
+       mn = i915_mmu_notifier_create(mm);
        if (IS_ERR(mn))
                err = PTR_ERR(mn);
 
@@ -238,10 +259,8 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
        mutex_unlock(&mm->i915->mm_lock);
        up_write(&mm->mm->mmap_sem);
 
-       if (mn && !IS_ERR(mn)) {
-               destroy_workqueue(mn->wq);
+       if (mn && !IS_ERR(mn))
                kfree(mn);
-       }
 
        return err ? ERR_PTR(err) : mm->mn;
 }
@@ -264,14 +283,14 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
                return PTR_ERR(mn);
 
        mo = kzalloc(sizeof(*mo), GFP_KERNEL);
-       if (mo == NULL)
+       if (!mo)
                return -ENOMEM;
 
        mo->mn = mn;
        mo->obj = obj;
        mo->it.start = obj->userptr.ptr;
        mo->it.last = obj->userptr.ptr + obj->base.size - 1;
-       INIT_WORK(&mo->work, cancel_userptr);
+       RB_CLEAR_NODE(&mo->it.rb);
 
        obj->userptr.mmu_object = mo;
        return 0;
@@ -285,12 +304,16 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
                return;
 
        mmu_notifier_unregister(&mn->mn, mm);
-       destroy_workqueue(mn->wq);
        kfree(mn);
 }
 
 #else
 
+static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
+{
+}
+
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
@@ -459,42 +482,6 @@ alloc_table:
        return st;
 }
 
-static int
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
-                             bool value)
-{
-       int ret = 0;
-
-       /* During mm_invalidate_range we need to cancel any userptr that
-        * overlaps the range being invalidated. Doing so requires the
-        * struct_mutex, and that risks recursion. In order to cause
-        * recursion, the user must alias the userptr address space with
-        * a GTT mmapping (possible with a MAP_FIXED) - then when we have
-        * to invalidate that mmaping, mm_invalidate_range is called with
-        * the userptr address *and* the struct_mutex held.  To prevent that
-        * we set a flag under the i915_mmu_notifier spinlock to indicate
-        * whether this object is valid.
-        */
-#if defined(CONFIG_MMU_NOTIFIER)
-       if (obj->userptr.mmu_object == NULL)
-               return 0;
-
-       spin_lock(&obj->userptr.mmu_object->mn->lock);
-       /* In order to serialise get_pages with an outstanding
-        * cancel_userptr, we must drop the struct_mutex and try again.
-        */
-       if (!value)
-               del_object(obj->userptr.mmu_object);
-       else if (!work_pending(&obj->userptr.mmu_object->work))
-               add_object(obj->userptr.mmu_object);
-       else
-               ret = -EAGAIN;
-       spin_unlock(&obj->userptr.mmu_object->mn->lock);
-#endif
-
-       return ret;
-}
-
 static void
 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
@@ -680,8 +667,11 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
        struct sgt_iter sgt_iter;
        struct page *page;
 
-       BUG_ON(obj->userptr.work != NULL);
+       /* Cancel any inflight work and force them to restart their gup */
+       obj->userptr.work = NULL;
        __i915_gem_userptr_set_active(obj, false);
+       if (!pages)
+               return;
 
        if (obj->mm.madv != I915_MADV_WILLNEED)
                obj->mm.dirty = false;
@@ -719,7 +709,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
-                I915_GEM_OBJECT_IS_SHRINKABLE,
+                I915_GEM_OBJECT_IS_SHRINKABLE |
+                I915_GEM_OBJECT_ASYNC_CANCEL,
        .get_pages = i915_gem_userptr_get_pages,
        .put_pages = i915_gem_userptr_put_pages,
        .dmabuf_export = i915_gem_userptr_dmabuf_export,
index 07465123c1663c61818fc1b63c1c04cab782ba06..9a65341fec097e500ace05a410b62f6f19390d21 100644 (file)
@@ -447,9 +447,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
        if (!erq->seqno)
                return;
 
-       err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+       err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
                   prefix, erq->pid, erq->ban_score,
-                  erq->context, erq->seqno, erq->sched_attr.priority,
+                  erq->context, erq->seqno,
+                  test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                           &erq->flags) ? "!" : "",
+                  test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+                           &erq->flags) ? "+" : "",
+                  erq->sched_attr.priority,
                   jiffies_to_msecs(erq->jiffies - epoch),
                   erq->start, erq->head, erq->tail);
 }
@@ -530,13 +535,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
        }
        err_printf(m, "  seqno: 0x%08x\n", ee->seqno);
        err_printf(m, "  last_seqno: 0x%08x\n", ee->last_seqno);
-       err_printf(m, "  waiting: %s\n", yesno(ee->waiting));
        err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
        err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
-       err_printf(m, "  hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
-       err_printf(m, "  hangcheck action: %s\n",
-                  hangcheck_action_to_str(ee->hangcheck_action));
-       err_printf(m, "  hangcheck action timestamp: %dms (%lu%s)\n",
+       err_printf(m, "  hangcheck timestamp: %dms (%lu%s)\n",
                   jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
                   ee->hangcheck_timestamp,
                   ee->hangcheck_timestamp == epoch ? "; epoch" : "");
@@ -594,13 +595,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
 
 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
                                   const struct intel_device_info *info,
+                                  const struct intel_runtime_info *runtime,
                                   const struct intel_driver_caps *caps)
 {
        struct drm_printer p = i915_error_printer(m);
 
        intel_device_info_dump_flags(info, &p);
        intel_driver_caps_print(caps, &p);
-       intel_device_info_dump_topology(&info->sseu, &p);
+       intel_device_info_dump_topology(&runtime->sseu, &p);
 }
 
 static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -664,7 +666,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 
        if (*error->error_msg)
                err_printf(m, "%s\n", error->error_msg);
-       err_printf(m, "Kernel: %s\n", init_utsname()->release);
+       err_printf(m, "Kernel: %s %s\n",
+                  init_utsname()->release,
+                  init_utsname()->machine);
        ts = ktime_to_timespec64(error->time);
        err_printf(m, "Time: %lld s %ld us\n",
                   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -681,15 +685,15 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
                   jiffies_to_msecs(error->capture - error->epoch));
 
        for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
-               if (error->engine[i].hangcheck_stalled &&
-                   error->engine[i].context.pid) {
-                       err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
-                                  engine_name(m->i915, i),
-                                  error->engine[i].context.comm,
-                                  error->engine[i].context.pid,
-                                  error->engine[i].context.ban_score,
-                                  bannable(&error->engine[i].context));
-               }
+               if (!error->engine[i].context.pid)
+                       continue;
+
+               err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
+                          engine_name(m->i915, i),
+                          error->engine[i].context.comm,
+                          error->engine[i].context.pid,
+                          error->engine[i].context.ban_score,
+                          bannable(&error->engine[i].context));
        }
        err_printf(m, "Reset count: %u\n", error->reset_count);
        err_printf(m, "Suspend count: %u\n", error->suspend_count);
@@ -719,8 +723,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
        err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
        err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
        err_printf(m, "CCID: 0x%08x\n", error->ccid);
-       err_printf(m, "Missed interrupts: 0x%08lx\n",
-                  m->i915->gpu_error.missed_irq_rings);
 
        for (i = 0; i < error->nfence; i++)
                err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
@@ -735,7 +737,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
                err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
        }
 
-       if (IS_GEN7(m->i915))
+       if (IS_GEN(m->i915, 7))
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
        for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -804,21 +806,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
                                                    error->epoch);
                }
 
-               if (IS_ERR(ee->waiters)) {
-                       err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
-                                  m->i915->engine[i]->name);
-               } else if (ee->num_waiters) {
-                       err_printf(m, "%s --- %d waiters\n",
-                                  m->i915->engine[i]->name,
-                                  ee->num_waiters);
-                       for (j = 0; j < ee->num_waiters; j++) {
-                               err_printf(m, " seqno 0x%08x for %s [%d]\n",
-                                          ee->waiters[j].seqno,
-                                          ee->waiters[j].comm,
-                                          ee->waiters[j].pid);
-                       }
-               }
-
                print_error_obj(m, m->i915->engine[i],
                                "ringbuffer", ee->ringbuffer);
 
@@ -844,7 +831,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
        if (error->display)
                intel_display_print_error_state(m, error->display);
 
-       err_print_capabilities(m, &error->device_info, &error->driver_caps);
+       err_print_capabilities(m, &error->device_info, &error->runtime_info,
+                              &error->driver_caps);
        err_print_params(m, &error->params);
        err_print_uc(m, &error->uc);
 }
@@ -963,17 +951,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
        kfree(obj);
 }
 
-static __always_inline void free_param(const char *type, void *x)
-{
-       if (!__builtin_strcmp(type, "char *"))
-               kfree(*(void **)x);
-}
 
 static void cleanup_params(struct i915_gpu_state *error)
 {
-#define FREE(T, x, ...) free_param(#T, &error->params.x);
-       I915_PARAMS_FOR_EACH(FREE);
-#undef FREE
+       i915_params_free(&error->params);
 }
 
 static void cleanup_uc_state(struct i915_gpu_state *error)
@@ -1006,8 +987,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
                i915_error_object_free(ee->wa_ctx);
 
                kfree(ee->requests);
-               if (!IS_ERR_OR_NULL(ee->waiters))
-                       kfree(ee->waiters);
        }
 
        for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
@@ -1037,7 +1016,7 @@ i915_error_object_create(struct drm_i915_private *i915,
        dma_addr_t dma;
        int ret;
 
-       if (!vma)
+       if (!vma || !vma->pages)
                return NULL;
 
        num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -1083,23 +1062,23 @@ i915_error_object_create(struct drm_i915_private *i915,
 }
 
 /* The error capture is special as tries to run underneath the normal
- * locking rules - so we use the raw version of the i915_gem_active lookup.
+ * locking rules - so we use the raw version of the i915_active_request lookup.
  */
-static inline uint32_t
-__active_get_seqno(struct i915_gem_active *active)
+static inline u32
+__active_get_seqno(struct i915_active_request *active)
 {
        struct i915_request *request;
 
-       request = __i915_gem_active_peek(active);
+       request = __i915_active_request_peek(active);
        return request ? request->global_seqno : 0;
 }
 
 static inline int
-__active_get_engine_id(struct i915_gem_active *active)
+__active_get_engine_id(struct i915_active_request *active)
 {
        struct i915_request *request;
 
-       request = __i915_gem_active_peek(active);
+       request = __i915_active_request_peek(active);
        return request ? request->engine->id : -1;
 }
 
@@ -1127,7 +1106,9 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 
 static u32 capture_error_bo(struct drm_i915_error_buffer *err,
                            int count, struct list_head *head,
-                           bool pinned_only)
+                           unsigned int flags)
+#define ACTIVE_ONLY BIT(0)
+#define PINNED_ONLY BIT(1)
 {
        struct i915_vma *vma;
        int i = 0;
@@ -1136,7 +1117,10 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
                if (!vma->obj)
                        continue;
 
-               if (pinned_only && !i915_vma_is_pinned(vma))
+               if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma))
+                       continue;
+
+               if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma))
                        continue;
 
                capture_bo(err++, vma);
@@ -1147,7 +1131,8 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
        return i;
 }
 
-/* Generate a semi-unique error code. The code is not meant to have meaning, The
+/*
+ * Generate a semi-unique error code. The code is not meant to have meaning, The
  * code's only purpose is to try to prevent false duplicated bug reports by
  * grossly estimating a GPU error state.
  *
@@ -1156,29 +1141,23 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
  *
  * It's only a small step better than a random number in its current form.
  */
-static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
-                                        struct i915_gpu_state *error,
-                                        int *engine_id)
+static u32 i915_error_generate_code(struct i915_gpu_state *error,
+                                   unsigned long engine_mask)
 {
-       uint32_t error_code = 0;
-       int i;
-
-       /* IPEHR would be an ideal way to detect errors, as it's the gross
+       /*
+        * IPEHR would be an ideal way to detect errors, as it's the gross
         * measure of "the command that hung." However, has some very common
         * synchronization commands which almost always appear in the case
         * strictly a client bug. Use instdone to differentiate those some.
         */
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               if (error->engine[i].hangcheck_stalled) {
-                       if (engine_id)
-                               *engine_id = i;
+       if (engine_mask) {
+               struct drm_i915_error_engine *ee =
+                       &error->engine[ffs(engine_mask)];
 
-                       return error->engine[i].ipehr ^
-                              error->engine[i].instdone.instdone;
-               }
+               return ee->ipehr ^ ee->instdone.instdone;
        }
 
-       return error_code;
+       return 0;
 }
 
 static void gem_record_fences(struct i915_gpu_state *error)
@@ -1211,59 +1190,6 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
                        I915_READ(RING_SYNC_2(engine->mmio_base));
 }
 
-static void error_record_engine_waiters(struct intel_engine_cs *engine,
-                                       struct drm_i915_error_engine *ee)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct drm_i915_error_waiter *waiter;
-       struct rb_node *rb;
-       int count;
-
-       ee->num_waiters = 0;
-       ee->waiters = NULL;
-
-       if (RB_EMPTY_ROOT(&b->waiters))
-               return;
-
-       if (!spin_trylock_irq(&b->rb_lock)) {
-               ee->waiters = ERR_PTR(-EDEADLK);
-               return;
-       }
-
-       count = 0;
-       for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
-               count++;
-       spin_unlock_irq(&b->rb_lock);
-
-       waiter = NULL;
-       if (count)
-               waiter = kmalloc_array(count,
-                                      sizeof(struct drm_i915_error_waiter),
-                                      GFP_ATOMIC);
-       if (!waiter)
-               return;
-
-       if (!spin_trylock_irq(&b->rb_lock)) {
-               kfree(waiter);
-               ee->waiters = ERR_PTR(-EDEADLK);
-               return;
-       }
-
-       ee->waiters = waiter;
-       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
-               struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
-               strcpy(waiter->comm, w->tsk->comm);
-               waiter->pid = w->tsk->pid;
-               waiter->seqno = w->seqno;
-               waiter++;
-
-               if (++ee->num_waiters == count)
-                       break;
-       }
-       spin_unlock_irq(&b->rb_lock);
-}
-
 static void error_record_engine_registers(struct i915_gpu_state *error,
                                          struct intel_engine_cs *engine,
                                          struct drm_i915_error_engine *ee)
@@ -1299,7 +1225,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 
        intel_engine_get_instdone(engine, &ee->instdone);
 
-       ee->waiting = intel_engine_has_waiter(engine);
        ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
        ee->acthd = intel_engine_get_active_head(engine);
        ee->seqno = intel_engine_get_seqno(engine);
@@ -1314,7 +1239,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
        if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
                i915_reg_t mmio;
 
-               if (IS_GEN7(dev_priv)) {
+               if (IS_GEN(dev_priv, 7)) {
                        switch (engine->id) {
                        default:
                        case RCS:
@@ -1330,7 +1255,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
                                mmio = VEBOX_HWS_PGA_GEN7;
                                break;
                        }
-               } else if (IS_GEN6(engine->i915)) {
+               } else if (IS_GEN(engine->i915, 6)) {
                        mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
                } else {
                        /* XXX: gen8 returns to sanity */
@@ -1341,9 +1266,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
        }
 
        ee->idle = intel_engine_is_idle(engine);
-       ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
-       ee->hangcheck_action = engine->hangcheck.action;
-       ee->hangcheck_stalled = engine->hangcheck.stalled;
+       if (!ee->idle)
+               ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
        ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
                                                  engine);
 
@@ -1352,10 +1276,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 
                ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
-               if (IS_GEN6(dev_priv))
+               if (IS_GEN(dev_priv, 6))
                        ee->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE_READ(engine));
-               else if (IS_GEN7(dev_priv))
+               else if (IS_GEN(dev_priv, 7))
                        ee->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE(engine));
                else if (INTEL_GEN(dev_priv) >= 8)
@@ -1374,6 +1298,7 @@ static void record_request(struct i915_request *request,
 {
        struct i915_gem_context *ctx = request->gem_context;
 
+       erq->flags = request->fence.flags;
        erq->context = ctx->hw_id;
        erq->sched_attr = request->sched.attr;
        erq->ban_score = atomic_read(&ctx->ban_score);
@@ -1549,7 +1474,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
                ee->engine_id = i;
 
                error_record_engine_registers(error, engine, ee);
-               error_record_engine_waiters(engine, ee);
                error_record_engine_execlists(engine, ee);
 
                request = i915_gem_find_active_request(engine);
@@ -1613,14 +1537,17 @@ static void gem_capture_vm(struct i915_gpu_state *error,
        int count;
 
        count = 0;
-       list_for_each_entry(vma, &vm->active_list, vm_link)
-               count++;
+       list_for_each_entry(vma, &vm->bound_list, vm_link)
+               if (i915_vma_is_active(vma))
+                       count++;
 
        active_bo = NULL;
        if (count)
                active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
        if (active_bo)
-               count = capture_error_bo(active_bo, count, &vm->active_list, false);
+               count = capture_error_bo(active_bo,
+                                        count, &vm->bound_list,
+                                        ACTIVE_ONLY);
        else
                count = 0;
 
@@ -1658,28 +1585,20 @@ static void capture_pinned_buffers(struct i915_gpu_state *error)
        struct i915_address_space *vm = &error->i915->ggtt.vm;
        struct drm_i915_error_buffer *bo;
        struct i915_vma *vma;
-       int count_inactive, count_active;
-
-       count_inactive = 0;
-       list_for_each_entry(vma, &vm->inactive_list, vm_link)
-               count_inactive++;
+       int count;
 
-       count_active = 0;
-       list_for_each_entry(vma, &vm->active_list, vm_link)
-               count_active++;
+       count = 0;
+       list_for_each_entry(vma, &vm->bound_list, vm_link)
+               count++;
 
        bo = NULL;
-       if (count_inactive + count_active)
-               bo = kcalloc(count_inactive + count_active,
-                            sizeof(*bo), GFP_ATOMIC);
+       if (count)
+               bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
        if (!bo)
                return;
 
-       count_inactive = capture_error_bo(bo, count_inactive,
-                                         &vm->active_list, true);
-       count_active = capture_error_bo(bo + count_inactive, count_active,
-                                       &vm->inactive_list, true);
-       error->pinned_bo_count = count_inactive + count_active;
+       error->pinned_bo_count =
+               capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY);
        error->pinned_bo = bo;
 }
 
@@ -1725,7 +1644,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
        }
 
-       if (IS_GEN7(dev_priv))
+       if (IS_GEN(dev_priv, 7))
                error->err_int = I915_READ(GEN7_ERR_INT);
 
        if (INTEL_GEN(dev_priv) >= 8) {
@@ -1733,7 +1652,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
        }
 
-       if (IS_GEN6(dev_priv)) {
+       if (IS_GEN(dev_priv, 6)) {
                error->forcewake = I915_READ_FW(FORCEWAKE);
                error->gab_ctl = I915_READ(GAB_CTL);
                error->gfx_mode = I915_READ(GFX_MODE);
@@ -1753,7 +1672,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->ccid = I915_READ(CCID);
 
        /* 3: Feature specific registers */
-       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+       if (IS_GEN_RANGE(dev_priv, 6, 7)) {
                error->gam_ecochk = I915_READ(GAM_ECOCHK);
                error->gac_eco = I915_READ(GAC_ECO_BITS);
        }
@@ -1777,7 +1696,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->ier = I915_READ(DEIER);
                error->gtier[0] = I915_READ(GTIER);
                error->ngtier = 1;
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                error->ier = I915_READ16(IER);
        } else if (!IS_VALLEYVIEW(dev_priv)) {
                error->ier = I915_READ(IER);
@@ -1786,31 +1705,35 @@ static void capture_reg_state(struct i915_gpu_state *error)
        error->pgtbl_er = I915_READ(PGTBL_ER);
 }
 
-static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
-                                  struct i915_gpu_state *error,
-                                  u32 engine_mask,
-                                  const char *error_msg)
+static const char *
+error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
 {
-       u32 ecode;
-       int engine_id = -1, len;
+       int len;
+       int i;
 
-       ecode = i915_error_generate_code(dev_priv, error, &engine_id);
+       for (i = 0; i < ARRAY_SIZE(error->engine); i++)
+               if (!error->engine[i].context.pid)
+                       engines &= ~BIT(i);
 
        len = scnprintf(error->error_msg, sizeof(error->error_msg),
-                       "GPU HANG: ecode %d:%d:0x%08x",
-                       INTEL_GEN(dev_priv), engine_id, ecode);
-
-       if (engine_id != -1 && error->engine[engine_id].context.pid)
+                       "GPU HANG: ecode %d:%lx:0x%08x",
+                       INTEL_GEN(error->i915), engines,
+                       i915_error_generate_code(error, engines));
+       if (engines) {
+               /* Just show the first executing process, more is confusing */
+               i = ffs(engines);
                len += scnprintf(error->error_msg + len,
                                 sizeof(error->error_msg) - len,
                                 ", in %s [%d]",
-                                error->engine[engine_id].context.comm,
-                                error->engine[engine_id].context.pid);
+                                error->engine[i].context.comm,
+                                error->engine[i].context.pid);
+       }
+       if (msg)
+               len += scnprintf(error->error_msg + len,
+                                sizeof(error->error_msg) - len,
+                                ", %s", msg);
 
-       scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
-                 ", reason: %s, action: %s",
-                 error_msg,
-                 engine_mask ? "reset" : "continue");
+       return error->error_msg;
 }
 
 static void capture_gen_state(struct i915_gpu_state *error)
@@ -1831,21 +1754,15 @@ static void capture_gen_state(struct i915_gpu_state *error)
        memcpy(&error->device_info,
               INTEL_INFO(i915),
               sizeof(error->device_info));
+       memcpy(&error->runtime_info,
+              RUNTIME_INFO(i915),
+              sizeof(error->runtime_info));
        error->driver_caps = i915->caps;
 }
 
-static __always_inline void dup_param(const char *type, void *x)
-{
-       if (!__builtin_strcmp(type, "char *"))
-               *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
-}
-
 static void capture_params(struct i915_gpu_state *error)
 {
-       error->params = i915_modparams;
-#define DUP(T, x, ...) dup_param(#T, &error->params.x);
-       I915_PARAMS_FOR_EACH(DUP);
-#undef DUP
+       i915_params_copy(&error->params, &i915_modparams);
 }
 
 static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
@@ -1856,7 +1773,7 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
        for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
                const struct drm_i915_error_engine *ee = &error->engine[i];
 
-               if (ee->hangcheck_stalled &&
+               if (ee->hangcheck_timestamp &&
                    time_before(ee->hangcheck_timestamp, epoch))
                        epoch = ee->hangcheck_timestamp;
        }
@@ -1907,9 +1824,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
 {
        struct i915_gpu_state *error;
 
+       /* Check if GPU capture has been disabled */
+       error = READ_ONCE(i915->gpu_error.first_error);
+       if (IS_ERR(error))
+               return error;
+
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
-       if (!error)
-               return NULL;
+       if (!error) {
+               i915_disable_error_state(i915, -ENOMEM);
+               return ERR_PTR(-ENOMEM);
+       }
 
        kref_init(&error->ref);
        error->i915 = i915;
@@ -1923,7 +1847,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
  * i915_capture_error_state - capture an error record for later analysis
  * @i915: i915 device
  * @engine_mask: the mask of engines triggering the hang
- * @error_msg: a message to insert into the error capture header
+ * @msg: a message to insert into the error capture header
  *
  * Should be called when an error is detected (either a hang or an error
  * interrupt) to capture error state from the time of the error.  Fills
@@ -1931,8 +1855,8 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
  * to pick up.
  */
 void i915_capture_error_state(struct drm_i915_private *i915,
-                             u32 engine_mask,
-                             const char *error_msg)
+                             unsigned long engine_mask,
+                             const char *msg)
 {
        static bool warned;
        struct i915_gpu_state *error;
@@ -1945,14 +1869,10 @@ void i915_capture_error_state(struct drm_i915_private *i915,
                return;
 
        error = i915_capture_gpu_state(i915);
-       if (!error) {
-               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
-               i915_disable_error_state(i915, -ENOMEM);
+       if (IS_ERR(error))
                return;
-       }
 
-       i915_error_capture_msg(i915, error, engine_mask, error_msg);
-       DRM_INFO("%s\n", error->error_msg);
+       dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg));
 
        if (!error->simulated) {
                spin_lock_irqsave(&i915->gpu_error.lock, flags);
@@ -1987,7 +1907,7 @@ i915_first_error_state(struct drm_i915_private *i915)
 
        spin_lock_irq(&i915->gpu_error.lock);
        error = i915->gpu_error.first_error;
-       if (error)
+       if (!IS_ERR_OR_NULL(error))
                i915_gpu_state_get(error);
        spin_unlock_irq(&i915->gpu_error.lock);
 
@@ -2000,10 +1920,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
 
        spin_lock_irq(&i915->gpu_error.lock);
        error = i915->gpu_error.first_error;
-       i915->gpu_error.first_error = NULL;
+       if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
+               i915->gpu_error.first_error = NULL;
        spin_unlock_irq(&i915->gpu_error.lock);
 
-       if (!IS_ERR(error))
+       if (!IS_ERR_OR_NULL(error))
                i915_gpu_state_put(error);
 }
 
index ff2652bbb0b08bb22cab8d32f338c1bdf8643e81..53b1f22dd365689c048df41fcc1ea8a4c148fa06 100644 (file)
@@ -45,6 +45,7 @@ struct i915_gpu_state {
        u32 reset_count;
        u32 suspend_count;
        struct intel_device_info device_info;
+       struct intel_runtime_info runtime_info;
        struct intel_driver_caps driver_caps;
        struct i915_params params;
 
@@ -81,11 +82,7 @@ struct i915_gpu_state {
                int engine_id;
                /* Software tracked state */
                bool idle;
-               bool waiting;
-               int num_waiters;
                unsigned long hangcheck_timestamp;
-               bool hangcheck_stalled;
-               enum intel_engine_hangcheck_action hangcheck_action;
                struct i915_address_space *vm;
                int num_requests;
                u32 reset_count;
@@ -148,6 +145,7 @@ struct i915_gpu_state {
                struct drm_i915_error_object *default_state;
 
                struct drm_i915_error_request {
+                       unsigned long flags;
                        long jiffies;
                        pid_t pid;
                        u32 context;
@@ -160,12 +158,6 @@ struct i915_gpu_state {
                } *requests, execlist[EXECLIST_MAX_PORTS];
                unsigned int num_ports;
 
-               struct drm_i915_error_waiter {
-                       char comm[TASK_COMM_LEN];
-                       pid_t pid;
-                       u32 seqno;
-               } *waiters;
-
                struct {
                        u32 gfx_mode;
                        union {
@@ -196,6 +188,8 @@ struct i915_gpu_state {
        struct scatterlist *sgl, *fit;
 };
 
+struct i915_gpu_restart;
+
 struct i915_gpu_error {
        /* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -210,8 +204,6 @@ struct i915_gpu_error {
 
        atomic_t pending_fb_pin;
 
-       unsigned long missed_irq_rings;
-
        /**
         * State variable controlling the reset flow and count
         *
@@ -246,15 +238,6 @@ struct i915_gpu_error {
         * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
         * secondary role in preventing two concurrent global reset attempts.
         *
-        * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
-        * struct_mutex. We try to acquire the struct_mutex in the reset worker,
-        * but it may be held by some long running waiter (that we cannot
-        * interrupt without causing trouble). Once we are ready to do the GPU
-        * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
-        * they already hold the struct_mutex and want to participate they can
-        * inspect the bit and do the reset directly, otherwise the worker
-        * waits for the struct_mutex.
-        *
         * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
         * acquire the struct_mutex to reset an engine, we need an explicit
         * flag to prevent two concurrent reset attempts in the same engine.
@@ -268,19 +251,14 @@ struct i915_gpu_error {
         */
        unsigned long flags;
 #define I915_RESET_BACKOFF     0
-#define I915_RESET_HANDOFF     1
-#define I915_RESET_MODESET     2
+#define I915_RESET_MODESET     1
+#define I915_RESET_ENGINE      2
 #define I915_WEDGED            (BITS_PER_LONG - 1)
-#define I915_RESET_ENGINE      (I915_WEDGED - I915_NUM_ENGINES)
 
        /** Number of times an engine has been reset */
        u32 reset_engine_count[I915_NUM_ENGINES];
 
-       /** Set of stalled engines with guilty requests, in the current reset */
-       u32 stalled_mask;
-
-       /** Reason for the current *global* reset */
-       const char *reason;
+       struct mutex wedge_mutex; /* serialises wedging/unwedging */
 
        /**
         * Waitqueue to signal when a hang is detected. Used to for waiters
@@ -294,8 +272,7 @@ struct i915_gpu_error {
         */
        wait_queue_head_t reset_queue;
 
-       /* For missed irq/seqno simulation. */
-       unsigned long test_irq_rings;
+       struct i915_gpu_restart *restart;
 };
 
 struct drm_i915_error_state_buf {
@@ -317,7 +294,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
 
 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
 void i915_capture_error_state(struct drm_i915_private *dev_priv,
-                             u32 engine_mask,
+                             unsigned long engine_mask,
                              const char *error_msg);
 
 static inline struct i915_gpu_state *
index e869daf9c8a9e0c21f409506ab60240f76bd95c7..c1007245f46dbbcabf671d107f4fa6e0df028b57 100644 (file)
@@ -28,8 +28,8 @@
  */
 #include <linux/compat.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
+#include <drm/drm_ioctl.h>
 #include "i915_drv.h"
 
 struct drm_i915_getparam32 {
index d447d7d508f483c62baecad23035a60702fd6a3c..441d2674b2725227116b5cd852779e86920f5fa0 100644 (file)
@@ -31,7 +31,8 @@
 #include <linux/sysrq.h>
 #include <linux/slab.h>
 #include <linux/circ_buf.h>
-#include <drm/drmP.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_drv.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
@@ -224,10 +225,10 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 /* For display hotplug interrupt */
 static inline void
 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
-                                    uint32_t mask,
-                                    uint32_t bits)
+                                    u32 mask,
+                                    u32 bits)
 {
-       uint32_t val;
+       u32 val;
 
        lockdep_assert_held(&dev_priv->irq_lock);
        WARN_ON(bits & ~mask);
@@ -251,8 +252,8 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
  * version is also available.
  */
 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
-                                  uint32_t mask,
-                                  uint32_t bits)
+                                  u32 mask,
+                                  u32 bits)
 {
        spin_lock_irq(&dev_priv->irq_lock);
        i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
@@ -301,10 +302,10 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
-                           uint32_t interrupt_mask,
-                           uint32_t enabled_irq_mask)
+                           u32 interrupt_mask,
+                           u32 enabled_irq_mask)
 {
-       uint32_t new_val;
+       u32 new_val;
 
        lockdep_assert_held(&dev_priv->irq_lock);
 
@@ -331,8 +332,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
-                             uint32_t interrupt_mask,
-                             uint32_t enabled_irq_mask)
+                             u32 interrupt_mask,
+                             u32 enabled_irq_mask)
 {
        lockdep_assert_held(&dev_priv->irq_lock);
 
@@ -346,13 +347,13 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 }
 
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
        ilk_update_gt_irq(dev_priv, mask, mask);
        POSTING_READ_FW(GTIMR);
 }
 
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
        ilk_update_gt_irq(dev_priv, mask, 0);
 }
@@ -391,10 +392,10 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
-                             uint32_t interrupt_mask,
-                             uint32_t enabled_irq_mask)
+                             u32 interrupt_mask,
+                             u32 enabled_irq_mask)
 {
-       uint32_t new_val;
+       u32 new_val;
 
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
@@ -578,11 +579,11 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
-                               uint32_t interrupt_mask,
-                               uint32_t enabled_irq_mask)
+                               u32 interrupt_mask,
+                               u32 enabled_irq_mask)
 {
-       uint32_t new_val;
-       uint32_t old_val;
+       u32 new_val;
+       u32 old_val;
 
        lockdep_assert_held(&dev_priv->irq_lock);
 
@@ -612,10 +613,10 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
  */
 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
                         enum pipe pipe,
-                        uint32_t interrupt_mask,
-                        uint32_t enabled_irq_mask)
+                        u32 interrupt_mask,
+                        u32 enabled_irq_mask)
 {
-       uint32_t new_val;
+       u32 new_val;
 
        lockdep_assert_held(&dev_priv->irq_lock);
 
@@ -642,10 +643,10 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
-                                 uint32_t interrupt_mask,
-                                 uint32_t enabled_irq_mask)
+                                 u32 interrupt_mask,
+                                 u32 enabled_irq_mask)
 {
-       uint32_t sdeimr = I915_READ(SDEIMR);
+       u32 sdeimr = I915_READ(SDEIMR);
        sdeimr &= ~interrupt_mask;
        sdeimr |= (~enabled_irq_mask & interrupt_mask);
 
@@ -822,11 +823,26 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+       const struct drm_display_mode *mode = &vblank->hwmode;
        i915_reg_t high_frame, low_frame;
        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
-       const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
        unsigned long irqflags;
 
+       /*
+        * On i965gm TV output the frame counter only works up to
+        * the point when we enable the TV encoder. After that the
+        * frame counter ceases to work and reads zero. We need a
+        * vblank wait before enabling the TV encoder and so we
+        * have to enable vblank interrupts while the frame counter
+        * is still in a working state. However the core vblank code
+        * does not like us returning non-zero frame counter values
+        * when we've told it that we don't have a working frame
+        * counter. Thus we must stop non-zero values leaking out.
+        */
+       if (!vblank->max_vblank_count)
+               return 0;
+
        htotal = mode->crtc_htotal;
        hsync_start = mode->crtc_hsync_start;
        vbl_start = mode->crtc_vblank_start;
@@ -950,7 +966,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                vtotal /= 2;
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
        else
                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -998,6 +1014,9 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        int position;
        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
        unsigned long irqflags;
+       bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
+               IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
+               mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
 
        if (WARN_ON(!mode->crtc_clock)) {
                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -1030,7 +1049,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        if (stime)
                *stime = ktime_get();
 
-       if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+       if (use_scanline_counter) {
                /* No obvious pixelcount register. Only query vertical
                 * scanout position from Display scan line register.
                 */
@@ -1090,7 +1109,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        else
                position += vtotal - vbl_end;
 
-       if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+       if (use_scanline_counter) {
                *vpos = position;
                *hpos = 0;
        } else {
@@ -1152,76 +1171,6 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
        return;
 }
 
-static void notify_ring(struct intel_engine_cs *engine)
-{
-       const u32 seqno = intel_engine_get_seqno(engine);
-       struct i915_request *rq = NULL;
-       struct task_struct *tsk = NULL;
-       struct intel_wait *wait;
-
-       if (unlikely(!engine->breadcrumbs.irq_armed))
-               return;
-
-       rcu_read_lock();
-
-       spin_lock(&engine->breadcrumbs.irq_lock);
-       wait = engine->breadcrumbs.irq_wait;
-       if (wait) {
-               /*
-                * We use a callback from the dma-fence to submit
-                * requests after waiting on our own requests. To
-                * ensure minimum delay in queuing the next request to
-                * hardware, signal the fence now rather than wait for
-                * the signaler to be woken up. We still wake up the
-                * waiter in order to handle the irq-seqno coherency
-                * issues (we may receive the interrupt before the
-                * seqno is written, see __i915_request_irq_complete())
-                * and to handle coalescing of multiple seqno updates
-                * and many waiters.
-                */
-               if (i915_seqno_passed(seqno, wait->seqno)) {
-                       struct i915_request *waiter = wait->request;
-
-                       if (waiter &&
-                           !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                     &waiter->fence.flags) &&
-                           intel_wait_check_request(wait, waiter))
-                               rq = i915_request_get(waiter);
-
-                       tsk = wait->tsk;
-               } else {
-                       if (engine->irq_seqno_barrier &&
-                           i915_seqno_passed(seqno, wait->seqno - 1)) {
-                               set_bit(ENGINE_IRQ_BREADCRUMB,
-                                       &engine->irq_posted);
-                               tsk = wait->tsk;
-                       }
-               }
-
-               engine->breadcrumbs.irq_count++;
-       } else {
-               if (engine->breadcrumbs.irq_armed)
-                       __intel_engine_disarm_breadcrumbs(engine);
-       }
-       spin_unlock(&engine->breadcrumbs.irq_lock);
-
-       if (rq) {
-               spin_lock(&rq->lock);
-               dma_fence_signal_locked(&rq->fence);
-               GEM_BUG_ON(!i915_request_completed(rq));
-               spin_unlock(&rq->lock);
-
-               i915_request_put(rq);
-       }
-
-       if (tsk && tsk->state & TASK_NORMAL)
-               wake_up_process(tsk);
-
-       rcu_read_unlock();
-
-       trace_intel_engine_notify(engine, wait);
-}
-
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
                        struct intel_rps_ei *ei)
 {
@@ -1376,8 +1325,8 @@ static void ivybridge_parity_work(struct work_struct *work)
                container_of(work, typeof(*dev_priv), l3_parity.error_work);
        u32 error_status, row, bank, subbank;
        char *parity_event[6];
-       uint32_t misccpctl;
-       uint8_t slice = 0;
+       u32 misccpctl;
+       u8 slice = 0;
 
        /* We must turn off DOP level clock gating to access the L3 registers.
         * In order to prevent a get/put style interface, acquire struct mutex
@@ -1466,20 +1415,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
        if (gt_iir & GT_RENDER_USER_INTERRUPT)
-               notify_ring(dev_priv->engine[RCS]);
+               intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
        if (gt_iir & ILK_BSD_USER_INTERRUPT)
-               notify_ring(dev_priv->engine[VCS]);
+               intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
 }
 
 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
        if (gt_iir & GT_RENDER_USER_INTERRUPT)
-               notify_ring(dev_priv->engine[RCS]);
+               intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
        if (gt_iir & GT_BSD_USER_INTERRUPT)
-               notify_ring(dev_priv->engine[VCS]);
+               intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
        if (gt_iir & GT_BLT_USER_INTERRUPT)
-               notify_ring(dev_priv->engine[BCS]);
+               intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
 
        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
                      GT_BSD_CS_ERROR_INTERRUPT |
@@ -1499,7 +1448,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
                tasklet = true;
 
        if (iir & GT_RENDER_USER_INTERRUPT) {
-               notify_ring(engine);
+               intel_engine_breadcrumbs_irq(engine);
                tasklet |= USES_GUC_SUBMISSION(engine->i915);
        }
 
@@ -1738,13 +1687,13 @@ static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
 #if defined(CONFIG_DEBUG_FS)
 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
                                         enum pipe pipe,
-                                        uint32_t crc0, uint32_t crc1,
-                                        uint32_t crc2, uint32_t crc3,
-                                        uint32_t crc4)
+                                        u32 crc0, u32 crc1,
+                                        u32 crc2, u32 crc3,
+                                        u32 crc4)
 {
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
        struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-       uint32_t crcs[5];
+       u32 crcs[5];
 
        spin_lock(&pipe_crc->lock);
        /*
@@ -1776,9 +1725,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 static inline void
 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
                             enum pipe pipe,
-                            uint32_t crc0, uint32_t crc1,
-                            uint32_t crc2, uint32_t crc3,
-                            uint32_t crc4) {}
+                            u32 crc0, u32 crc1,
+                            u32 crc2, u32 crc3,
+                            u32 crc4) {}
 #endif
 
 
@@ -1804,7 +1753,7 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
                                      enum pipe pipe)
 {
-       uint32_t res1, res2;
+       u32 res1, res2;
 
        if (INTEL_GEN(dev_priv) >= 3)
                res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
@@ -1845,7 +1794,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 
        if (HAS_VEBOX(dev_priv)) {
                if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-                       notify_ring(dev_priv->engine[VECS]);
+                       intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
 
                if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
                        DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -2547,7 +2496,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
                I915_WRITE(SDEIIR, pch_iir);
        }
 
-       if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+       if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
                ironlake_rps_change_irq_handler(dev_priv);
 }
 
@@ -2938,46 +2887,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct wedge_me {
-       struct delayed_work work;
-       struct drm_i915_private *i915;
-       const char *name;
-};
-
-static void wedge_me(struct work_struct *work)
-{
-       struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
-       dev_err(w->i915->drm.dev,
-               "%s timed out, cancelling all in-flight rendering.\n",
-               w->name);
-       i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
-                        struct drm_i915_private *i915,
-                        long timeout,
-                        const char *name)
-{
-       w->i915 = i915;
-       w->name = name;
-
-       INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
-       schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
-       cancel_delayed_work_sync(&w->work);
-       destroy_delayed_work_on_stack(&w->work);
-       w->i915 = NULL;
-}
-
-#define i915_wedge_on_timeout(W, DEV, TIMEOUT)                         \
-       for (__init_wedge((W), (DEV), (TIMEOUT), __func__);             \
-            (W)->i915;                                                 \
-            __fini_wedge((W)))
-
 static u32
 gen11_gt_engine_identity(struct drm_i915_private * const i915,
                         const unsigned int bank, const unsigned int bit)
@@ -3188,203 +3097,6 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static void i915_reset_device(struct drm_i915_private *dev_priv,
-                             u32 engine_mask,
-                             const char *reason)
-{
-       struct i915_gpu_error *error = &dev_priv->gpu_error;
-       struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
-       char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
-       char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
-       char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-       struct wedge_me w;
-
-       kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
-
-       DRM_DEBUG_DRIVER("resetting chip\n");
-       kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
-
-       /* Use a watchdog to ensure that our reset completes */
-       i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
-               intel_prepare_reset(dev_priv);
-
-               error->reason = reason;
-               error->stalled_mask = engine_mask;
-
-               /* Signal that locked waiters should reset the GPU */
-               smp_mb__before_atomic();
-               set_bit(I915_RESET_HANDOFF, &error->flags);
-               wake_up_all(&error->wait_queue);
-
-               /* Wait for anyone holding the lock to wakeup, without
-                * blocking indefinitely on struct_mutex.
-                */
-               do {
-                       if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
-                               i915_reset(dev_priv, engine_mask, reason);
-                               mutex_unlock(&dev_priv->drm.struct_mutex);
-                       }
-               } while (wait_on_bit_timeout(&error->flags,
-                                            I915_RESET_HANDOFF,
-                                            TASK_UNINTERRUPTIBLE,
-                                            1));
-
-               error->stalled_mask = 0;
-               error->reason = NULL;
-
-               intel_finish_reset(dev_priv);
-       }
-
-       if (!test_bit(I915_WEDGED, &error->flags))
-               kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
-}
-
-void i915_clear_error_registers(struct drm_i915_private *dev_priv)
-{
-       u32 eir;
-
-       if (!IS_GEN2(dev_priv))
-               I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
-
-       if (INTEL_GEN(dev_priv) < 4)
-               I915_WRITE(IPEIR, I915_READ(IPEIR));
-       else
-               I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
-
-       I915_WRITE(EIR, I915_READ(EIR));
-       eir = I915_READ(EIR);
-       if (eir) {
-               /*
-                * some errors might have become stuck,
-                * mask them.
-                */
-               DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
-               I915_WRITE(EMR, I915_READ(EMR) | eir);
-               I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
-       }
-
-       if (INTEL_GEN(dev_priv) >= 8) {
-               I915_WRITE(GEN8_RING_FAULT_REG,
-                          I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
-               POSTING_READ(GEN8_RING_FAULT_REG);
-       } else if (INTEL_GEN(dev_priv) >= 6) {
-               struct intel_engine_cs *engine;
-               enum intel_engine_id id;
-
-               for_each_engine(engine, dev_priv, id) {
-                       I915_WRITE(RING_FAULT_REG(engine),
-                                  I915_READ(RING_FAULT_REG(engine)) &
-                                  ~RING_FAULT_VALID);
-               }
-               POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
-       }
-}
-
-/**
- * i915_handle_error - handle a gpu error
- * @dev_priv: i915 device private
- * @engine_mask: mask representing engines that are hung
- * @flags: control flags
- * @fmt: Error message format string
- *
- * Do some basic checking of register state at error time and
- * dump it to the syslog.  Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs.  Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-void i915_handle_error(struct drm_i915_private *dev_priv,
-                      u32 engine_mask,
-                      unsigned long flags,
-                      const char *fmt, ...)
-{
-       struct intel_engine_cs *engine;
-       unsigned int tmp;
-       char error_msg[80];
-       char *msg = NULL;
-
-       if (fmt) {
-               va_list args;
-
-               va_start(args, fmt);
-               vscnprintf(error_msg, sizeof(error_msg), fmt, args);
-               va_end(args);
-
-               msg = error_msg;
-       }
-
-       /*
-        * In most cases it's guaranteed that we get here with an RPM
-        * reference held, for example because there is a pending GPU
-        * request that won't finish until the reset is done. This
-        * isn't the case at least when we get here by doing a
-        * simulated reset via debugfs, so get an RPM reference.
-        */
-       intel_runtime_pm_get(dev_priv);
-
-       engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
-
-       if (flags & I915_ERROR_CAPTURE) {
-               i915_capture_error_state(dev_priv, engine_mask, msg);
-               i915_clear_error_registers(dev_priv);
-       }
-
-       /*
-        * Try engine reset when available. We fall back to full reset if
-        * single reset fails.
-        */
-       if (intel_has_reset_engine(dev_priv) &&
-           !i915_terminally_wedged(&dev_priv->gpu_error)) {
-               for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-                       BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
-                       if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
-                                            &dev_priv->gpu_error.flags))
-                               continue;
-
-                       if (i915_reset_engine(engine, msg) == 0)
-                               engine_mask &= ~intel_engine_flag(engine);
-
-                       clear_bit(I915_RESET_ENGINE + engine->id,
-                                 &dev_priv->gpu_error.flags);
-                       wake_up_bit(&dev_priv->gpu_error.flags,
-                                   I915_RESET_ENGINE + engine->id);
-               }
-       }
-
-       if (!engine_mask)
-               goto out;
-
-       /* Full reset needs the mutex, stop any other user trying to do so. */
-       if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
-               wait_event(dev_priv->gpu_error.reset_queue,
-                          !test_bit(I915_RESET_BACKOFF,
-                                    &dev_priv->gpu_error.flags));
-               goto out;
-       }
-
-       /* Prevent any other reset-engine attempt. */
-       for_each_engine(engine, dev_priv, tmp) {
-               while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
-                                       &dev_priv->gpu_error.flags))
-                       wait_on_bit(&dev_priv->gpu_error.flags,
-                                   I915_RESET_ENGINE + engine->id,
-                                   TASK_UNINTERRUPTIBLE);
-       }
-
-       i915_reset_device(dev_priv, engine_mask, msg);
-
-       for_each_engine(engine, dev_priv, tmp) {
-               clear_bit(I915_RESET_ENGINE + engine->id,
-                         &dev_priv->gpu_error.flags);
-       }
-
-       clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
-       wake_up_all(&dev_priv->gpu_error.reset_queue);
-
-out:
-       intel_runtime_pm_put(dev_priv);
-}
-
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
@@ -3417,7 +3129,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
-       uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+       u32 bit = INTEL_GEN(dev_priv) >= 7 ?
                DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3479,7 +3191,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
-       uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+       u32 bit = INTEL_GEN(dev_priv) >= 7 ?
                DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3586,11 +3298,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       if (IS_GEN5(dev_priv))
-               I915_WRITE(HWSTAM, 0xffffffff);
-
        GEN3_IRQ_RESET(DE);
-       if (IS_GEN7(dev_priv))
+       if (IS_GEN(dev_priv, 7))
                I915_WRITE(GEN7_ERR_INT, 0xffffffff);
 
        if (IS_HASWELL(dev_priv)) {
@@ -3700,7 +3409,7 @@ static void gen11_irq_reset(struct drm_device *dev)
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
                                     u8 pipe_mask)
 {
-       uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+       u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
        enum pipe pipe;
 
        spin_lock_irq(&dev_priv->irq_lock);
@@ -4045,7 +3754,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
        }
 
        gt_irqs |= GT_RENDER_USER_INTERRUPT;
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                gt_irqs |= ILK_BSD_USER_INTERRUPT;
        } else {
                gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -4169,7 +3878,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        /* These are interrupts we'll toggle with the ring mask register */
-       uint32_t gt_interrupts[] = {
+       u32 gt_interrupts[] = {
                GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
@@ -4183,9 +3892,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
                };
 
-       if (HAS_L3_DPF(dev_priv))
-               gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
        dev_priv->pm_ier = 0x0;
        dev_priv->pm_imr = ~dev_priv->pm_ier;
        GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
@@ -4200,8 +3906,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 
 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-       uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
-       uint32_t de_pipe_enables;
+       u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+       u32 de_pipe_enables;
        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
        u32 de_port_enables;
        u32 de_misc_masked = GEN8_DE_EDP_PSR;
@@ -4341,6 +4047,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
        I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
        gen11_master_intr_enable(dev_priv->regs);
+       POSTING_READ(GEN11_GFX_MSTR_IRQ);
 
        return 0;
 }
@@ -4368,8 +4075,6 @@ static void i8xx_irq_reset(struct drm_device *dev)
 
        i9xx_pipestat_irq_reset(dev_priv);
 
-       I915_WRITE16(HWSTAM, 0xffff);
-
        GEN2_IRQ_RESET();
 }
 
@@ -4513,7 +4218,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                I915_WRITE16(IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       notify_ring(dev_priv->engine[RCS]);
+                       intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4537,8 +4242,6 @@ static void i915_irq_reset(struct drm_device *dev)
 
        i9xx_pipestat_irq_reset(dev_priv);
 
-       I915_WRITE(HWSTAM, 0xffffffff);
-
        GEN3_IRQ_RESET();
 }
 
@@ -4623,7 +4326,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                I915_WRITE(IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       notify_ring(dev_priv->engine[RCS]);
+                       intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4648,8 +4351,6 @@ static void i965_irq_reset(struct drm_device *dev)
 
        i9xx_pipestat_irq_reset(dev_priv);
 
-       I915_WRITE(HWSTAM, 0xffffffff);
-
        GEN3_IRQ_RESET();
 }
 
@@ -4770,10 +4471,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                I915_WRITE(IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       notify_ring(dev_priv->engine[RCS]);
+                       intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
 
                if (iir & I915_BSD_USER_INTERRUPT)
-                       notify_ring(dev_priv->engine[VCS]);
+                       intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4836,23 +4537,17 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) >= 8)
                rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 
-       if (IS_GEN2(dev_priv)) {
-               /* Gen2 doesn't have a hardware frame counter */
-               dev->max_vblank_count = 0;
-       } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
-               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                dev->driver->get_vblank_counter = g4x_get_vblank_counter;
-       } else {
+       else if (INTEL_GEN(dev_priv) >= 3)
                dev->driver->get_vblank_counter = i915_get_vblank_counter;
-               dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-       }
 
        /*
         * Opt out of the vblank disable timer on everything except gen2.
         * Gen2 doesn't have a hardware frame counter and so depends on
         * vblank interrupts to produce sane vblank seuquence numbers.
         */
-       if (!IS_GEN2(dev_priv))
+       if (!IS_GEN(dev_priv, 2))
                dev->vblank_disable_immediate = true;
 
        /* Most platforms treat the display irq block as an always-on
@@ -4924,14 +4619,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->disable_vblank = ironlake_disable_vblank;
                dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
        } else {
-               if (IS_GEN2(dev_priv)) {
+               if (IS_GEN(dev_priv, 2)) {
                        dev->driver->irq_preinstall = i8xx_irq_reset;
                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
                        dev->driver->irq_handler = i8xx_irq_handler;
                        dev->driver->irq_uninstall = i8xx_irq_reset;
                        dev->driver->enable_vblank = i8xx_enable_vblank;
                        dev->driver->disable_vblank = i8xx_disable_vblank;
-               } else if (IS_GEN3(dev_priv)) {
+               } else if (IS_GEN(dev_priv, 3)) {
                        dev->driver->irq_preinstall = i915_irq_reset;
                        dev->driver->irq_postinstall = i915_irq_postinstall;
                        dev->driver->irq_uninstall = i915_irq_reset;
index 2e0356561839d15016d72394d7960388545fb223..b5be0abbba359e4372c458c57597bb67e574a172 100644 (file)
@@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600,
        "triaging and debugging hangs.");
 #endif
 
-i915_param_named_unsafe(enable_hangcheck, bool, 0644,
+i915_param_named_unsafe(enable_hangcheck, bool, 0600,
        "Periodically check GPU activity for detecting hangs. "
        "WARNING: Disabling this can cause system wide hangs. "
        "(default: true)");
@@ -97,8 +97,10 @@ i915_param_named_unsafe(disable_power_well, int, 0400,
 
 i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
 
-i915_param_named(fastboot, bool, 0600,
-       "Try to skip unnecessary mode sets at boot time (default: false)");
+i915_param_named(fastboot, int, 0600,
+       "Try to skip unnecessary mode sets at boot time "
+       "(0=disabled, 1=enabled) "
+       "Default: -1 (use per-chip default)");
 
 i915_param_named_unsafe(prefault_disable, bool, 0600,
        "Disable page prefaulting for pread/pwrite/reloc (default:false). "
@@ -203,3 +205,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
        I915_PARAMS_FOR_EACH(PRINT);
 #undef PRINT
 }
+
+static __always_inline void dup_param(const char *type, void *x)
+{
+       if (!__builtin_strcmp(type, "char *"))
+               *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
+}
+
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
+{
+       *dest = *src;
+#define DUP(T, x, ...) dup_param(#T, &dest->x);
+       I915_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
+static __always_inline void free_param(const char *type, void *x)
+{
+       if (!__builtin_strcmp(type, "char *")) {
+               kfree(*(void **)x);
+               *(void **)x = NULL;
+       }
+}
+
+/* free the allocated members, *not* the passed in params itself */
+void i915_params_free(struct i915_params *params)
+{
+#define FREE(T, x, ...) free_param(#T, &params->x);
+       I915_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
index 7e56c516c815c269230c697da6166a7fae455fd8..3f14e9881a0d3057877ee309482b8eb4fc55f072 100644 (file)
@@ -33,6 +33,15 @@ struct drm_printer;
 #define ENABLE_GUC_SUBMISSION          BIT(0)
 #define ENABLE_GUC_LOAD_HUC            BIT(1)
 
+/*
+ * Invoke param, a function-like macro, for each i915 param, with arguments:
+ *
+ * param(type, name, value)
+ *
+ * type: parameter type, one of {bool, int, unsigned int, char *}
+ * name: name of the parameter
+ * value: initial/default value of the parameter
+ */
 #define I915_PARAMS_FOR_EACH(param) \
        param(char *, vbt_firmware, NULL) \
        param(int, modeset, -1) \
@@ -54,10 +63,10 @@ struct drm_printer;
        param(int, edp_vswing, 0) \
        param(int, reset, 2) \
        param(unsigned int, inject_load_failure, 0) \
+       param(int, fastboot, -1) \
        /* leave bools at the end to not create holes */ \
        param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
        param(bool, enable_hangcheck, true) \
-       param(bool, fastboot, false) \
        param(bool, prefault_disable, false) \
        param(bool, load_detect_test, false) \
        param(bool, force_reset_modeset_test, false) \
@@ -78,6 +87,8 @@ struct i915_params {
 extern struct i915_params i915_modparams __read_mostly;
 
 void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
+void i915_params_free(struct i915_params *params);
 
 #endif
 
index 6350db5503cda372cedcda638680dfcec9725a7e..66f82f3f050f34aa1a6756800b2436ceab50c9ab 100644 (file)
@@ -26,6 +26,9 @@
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 
+#include <drm/drm_drv.h>
+
+#include "i915_active.h"
 #include "i915_drv.h"
 #include "i915_selftest.h"
 
 #define BDW_COLORS \
        .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
 #define CHV_COLORS \
-       .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+       .color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
+                  .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+                  .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+       }
 #define GLK_COLORS \
-       .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
+       .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \
+                  .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+                                       DRM_COLOR_LUT_EQUAL_CHANNELS, \
+       }
 
 /* Keep in gen based order, and chronological order within a gen */
 
@@ -81,7 +90,8 @@
        .num_pipes = 1, \
        .display.has_overlay = 1, \
        .display.overlay_needs_physical = 1, \
-       .display.has_gmch_display = 1, \
+       .display.has_gmch = 1, \
+       .gpu_reset_clobbers_display = true, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
        .ring_mask = RENDER_RING, \
@@ -121,7 +131,8 @@ static const struct intel_device_info intel_i865g_info = {
 #define GEN3_FEATURES \
        GEN(3), \
        .num_pipes = 2, \
-       .display.has_gmch_display = 1, \
+       .display.has_gmch = 1, \
+       .gpu_reset_clobbers_display = true, \
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@ -197,7 +208,8 @@ static const struct intel_device_info intel_pineview_info = {
        GEN(4), \
        .num_pipes = 2, \
        .display.has_hotplug = 1, \
-       .display.has_gmch_display = 1, \
+       .display.has_gmch = 1, \
+       .gpu_reset_clobbers_display = true, \
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@ -228,6 +240,7 @@ static const struct intel_device_info intel_g45_info = {
        GEN4_FEATURES,
        PLATFORM(INTEL_G45),
        .ring_mask = RENDER_RING | BSD_RING,
+       .gpu_reset_clobbers_display = false,
 };
 
 static const struct intel_device_info intel_gm45_info = {
@@ -237,6 +250,7 @@ static const struct intel_device_info intel_gm45_info = {
        .display.has_fbc = 1,
        .display.supports_tv = 1,
        .ring_mask = RENDER_RING | BSD_RING,
+       .gpu_reset_clobbers_display = false,
 };
 
 #define GEN5_FEATURES \
@@ -370,7 +384,7 @@ static const struct intel_device_info intel_valleyview_info = {
        .num_pipes = 2,
        .has_runtime_pm = 1,
        .has_rc6 = 1,
-       .display.has_gmch_display = 1,
+       .display.has_gmch = 1,
        .display.has_hotplug = 1,
        .ppgtt = INTEL_PPGTT_FULL,
        .has_snoop = true,
@@ -462,7 +476,7 @@ static const struct intel_device_info intel_cherryview_info = {
        .has_runtime_pm = 1,
        .has_rc6 = 1,
        .has_logical_ring_contexts = 1,
-       .display.has_gmch_display = 1,
+       .display.has_gmch = 1,
        .ppgtt = INTEL_PPGTT_FULL,
        .has_reset_engine = 1,
        .has_snoop = true,
@@ -532,7 +546,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
        .display.has_fbc = 1, \
        .display.has_psr = 1, \
        .has_runtime_pm = 1, \
-       .has_pooled_eu = 0, \
        .display.has_csr = 1, \
        .has_rc6 = 1, \
        .display.has_dp_mst = 1, \
@@ -701,6 +714,7 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
        INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
+       INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
@@ -787,6 +801,8 @@ static int __init i915_init(void)
        bool use_kms = true;
        int err;
 
+       i915_global_active_init();
+
        err = i915_mock_selftests();
        if (err)
                return err > 0 ? 0 : err;
@@ -818,6 +834,7 @@ static void __exit i915_exit(void)
                return;
 
        pci_unregister_driver(&i915_pci_driver);
+       i915_global_active_exit();
 }
 
 module_init(i915_init);
index 2b2eb57ca71f2905020aed9f7797684f69c148e7..9ebf99f3d8d3ee524443f3cea90a1ab56e48fe3c 100644 (file)
@@ -1365,7 +1365,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
        free_oa_buffer(dev_priv);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, stream->wakeref);
 
        if (stream->ctx)
                oa_put_render_ctx_id(stream);
@@ -1677,6 +1677,11 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
 
                CTX_REG(reg_state, state_offset, flex_regs[i], value);
        }
+
+       CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+               gen8_make_rpcs(dev_priv,
+                              &to_intel_context(ctx,
+                                                dev_priv->engine[RCS])->sseu));
 }
 
 /*
@@ -1796,7 +1801,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
         * be read back from automatically triggered reports, as part of the
         * RPT_ID field.
         */
-       if (IS_GEN(dev_priv, 9, 11)) {
+       if (IS_GEN_RANGE(dev_priv, 9, 11)) {
                I915_WRITE(GEN8_OA_DEBUG,
                           _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
                                              GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2087,7 +2092,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
         *   In our case we are expecting that taking pm + FORCEWAKE
         *   references will effectively disable RC6.
         */
-       intel_runtime_pm_get(dev_priv);
+       stream->wakeref = intel_runtime_pm_get(dev_priv);
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        ret = alloc_oa_buffer(dev_priv);
@@ -2098,21 +2103,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
        if (ret)
                goto err_lock;
 
+       stream->ops = &i915_oa_stream_ops;
+       dev_priv->perf.oa.exclusive_stream = stream;
+
        ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
        if (ret) {
                DRM_DEBUG("Unable to enable metric set\n");
                goto err_enable;
        }
 
-       stream->ops = &i915_oa_stream_ops;
-
-       dev_priv->perf.oa.exclusive_stream = stream;
-
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        return 0;
 
 err_enable:
+       dev_priv->perf.oa.exclusive_stream = NULL;
        dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
@@ -2123,7 +2128,7 @@ err_oa_buf_alloc:
        put_oa_config(dev_priv, stream->oa_config);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, stream->wakeref);
 
 err_config:
        if (stream->ctx)
@@ -2646,7 +2651,7 @@ err:
 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
 {
        return div64_u64(1000000000ULL * (2ULL << exponent),
-                        1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
+                        1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
 }
 
 /**
@@ -3021,7 +3026,7 @@ static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
                (addr >= 0x182300 && addr <= 0x1823A4);
 }
 
-static uint32_t mask_reg_value(u32 reg, u32 val)
+static u32 mask_reg_value(u32 reg, u32 val)
 {
        /* HALF_SLICE_CHICKEN2 is programmed with a the
         * WaDisableSTUnitPowerOptimization workaround. Make sure the value
@@ -3415,7 +3420,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                dev_priv->perf.oa.ops.read = gen8_oa_read;
                dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
 
-               if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
+               if (IS_GEN_RANGE(dev_priv, 8, 9)) {
                        dev_priv->perf.oa.ops.is_valid_b_counter_reg =
                                gen7_is_valid_b_counter_addr;
                        dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3431,7 +3436,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                        dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
                        dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
 
-                       if (IS_GEN8(dev_priv)) {
+                       if (IS_GEN(dev_priv, 8)) {
                                dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
                                dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
 
@@ -3442,7 +3447,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
 
                                dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
                        }
-               } else if (IS_GEN(dev_priv, 10, 11)) {
+               } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
                        dev_priv->perf.oa.ops.is_valid_b_counter_reg =
                                gen7_is_valid_b_counter_addr;
                        dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3471,7 +3476,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
 
                oa_sample_rate_hard_limit = 1000 *
-                       (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
+                       (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
                dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
 
                mutex_init(&dev_priv->perf.metrics_lock);
index d6c8f8fdfda5f106776e0a148e034e10e64ccbb7..13d70b90dd0f9bebb3130b3116fcf5585f667ec5 100644 (file)
@@ -167,6 +167,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        bool fw = false;
 
        if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
@@ -175,7 +176,8 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
        if (!dev_priv->gt.awake)
                return;
 
-       if (!intel_runtime_pm_get_if_in_use(dev_priv))
+       wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+       if (!wakeref)
                return;
 
        for_each_engine(engine, dev_priv, id) {
@@ -210,7 +212,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
        if (fw)
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 }
 
 static void
@@ -227,11 +229,12 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
                u32 val;
 
                val = dev_priv->gt_pm.rps.cur_freq;
-               if (dev_priv->gt.awake &&
-                   intel_runtime_pm_get_if_in_use(dev_priv)) {
-                       val = intel_get_cagf(dev_priv,
-                                            I915_READ_NOTRACE(GEN6_RPSTAT1));
-                       intel_runtime_pm_put(dev_priv);
+               if (dev_priv->gt.awake) {
+                       intel_wakeref_t wakeref;
+
+                       with_intel_runtime_pm_if_in_use(dev_priv, wakeref)
+                               val = intel_get_cagf(dev_priv,
+                                                    I915_READ_NOTRACE(GEN6_RPSTAT1));
                }
 
                add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
@@ -443,12 +446,14 @@ static u64 __get_rc6(struct drm_i915_private *i915)
 static u64 get_rc6(struct drm_i915_private *i915)
 {
 #if IS_ENABLED(CONFIG_PM)
+       intel_wakeref_t wakeref;
        unsigned long flags;
        u64 val;
 
-       if (intel_runtime_pm_get_if_in_use(i915)) {
+       wakeref = intel_runtime_pm_get_if_in_use(i915);
+       if (wakeref) {
                val = __get_rc6(i915);
-               intel_runtime_pm_put(i915);
+               intel_runtime_pm_put(i915, wakeref);
 
                /*
                 * If we are coming back from being runtime suspended we must
@@ -594,7 +599,8 @@ static void i915_pmu_enable(struct perf_event *event)
         * Update the bitmask of enabled events and increment
         * the event reference counter.
         */
-       GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+       BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
+       GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
        GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
        i915->pmu.enable |= BIT_ULL(bit);
        i915->pmu.enable_count[bit]++;
@@ -615,11 +621,16 @@ static void i915_pmu_enable(struct perf_event *event)
                engine = intel_engine_lookup_user(i915,
                                                  engine_event_class(event),
                                                  engine_event_instance(event));
-               GEM_BUG_ON(!engine);
-               engine->pmu.enable |= BIT(sample);
 
-               GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+               BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
+                            I915_ENGINE_SAMPLE_COUNT);
+               BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
+                            I915_ENGINE_SAMPLE_COUNT);
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
                GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+
+               engine->pmu.enable |= BIT(sample);
                engine->pmu.enable_count[sample]++;
        }
 
@@ -649,9 +660,11 @@ static void i915_pmu_disable(struct perf_event *event)
                engine = intel_engine_lookup_user(i915,
                                                  engine_event_class(event),
                                                  engine_event_instance(event));
-               GEM_BUG_ON(!engine);
-               GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
                GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+
                /*
                 * Decrement the reference count and clear the enabled
                 * bitmask when the last listener on an event goes away.
@@ -660,7 +673,7 @@ static void i915_pmu_disable(struct perf_event *event)
                        engine->pmu.enable &= ~BIT(sample);
        }
 
-       GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+       GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
        GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
        /*
         * Decrement the reference count and clear the enabled
index 7f164ca3db129472d3262439f5290d505ea6e14a..b3728c5f13e739f7c3a1e5c0d5e1a55deca1c342 100644 (file)
@@ -31,6 +31,8 @@ enum {
        ((1 << I915_PMU_SAMPLE_BITS) + \
         (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
 
+#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
+
 struct i915_pmu_sample {
        u64 cur;
 };
index fe56465cdfd67512aca73d3cdf0bc3b7e7f6bf2f..cbcb957b7141d768541853129cdc029d5f804029 100644 (file)
@@ -13,7 +13,7 @@
 static int query_topology_info(struct drm_i915_private *dev_priv,
                               struct drm_i915_query_item *query_item)
 {
-       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        struct drm_i915_query_topology_info topo;
        u32 slice_length, subslice_length, eu_length, total_length;
 
index 0a7d60509ca7527f018a12208316bff91c7c6be8..638a586469f97be9fb83bbbcb152c518e7d46e1e 100644 (file)
  */
 
 typedef struct {
-       uint32_t reg;
+       u32 reg;
 } i915_reg_t;
 
 #define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
 
 #define INVALID_MMIO_REG _MMIO(0)
 
-static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg)
+static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
 {
        return reg.reg;
 }
@@ -139,6 +139,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
        return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
 }
 
+#define VLV_DISPLAY_BASE               0x180000
+#define VLV_MIPI_BASE                  VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE                  0x60000
+
+#define DISPLAY_MMIO_BASE(dev_priv)    (INTEL_INFO(dev_priv)->display_mmio_offset)
+
 /*
  * Given the first two numbers __a and __b of arbitrarily many evenly spaced
  * numbers, pick the 0-based __index'th value.
@@ -179,15 +185,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  * Device info offset array based helpers for groups of registers with unevenly
  * spaced base offsets.
  */
-#define _MMIO_PIPE2(pipe, reg)         _MMIO(dev_priv->info.pipe_offsets[pipe] - \
-                                             dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
-                                             dev_priv->info.display_mmio_offset)
-#define _MMIO_TRANS2(pipe, reg)                _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
-                                             dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
-                                             dev_priv->info.display_mmio_offset)
-#define _CURSOR2(pipe, reg)            _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
-                                             dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
-                                             dev_priv->info.display_mmio_offset)
+#define _MMIO_PIPE2(pipe, reg)         _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
+                                             INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
+                                             DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_TRANS2(pipe, reg)                _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
+                                             INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
+                                             DISPLAY_MMIO_BASE(dev_priv))
+#define _CURSOR2(pipe, reg)            _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
+                                             INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
+                                             DISPLAY_MMIO_BASE(dev_priv))
 
 #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
 #define _MASKED_FIELD(mask, value) ({                                     \
@@ -347,6 +353,24 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN11_GRDOM_MEDIA4            (1 << 8)
 #define  GEN11_GRDOM_VECS              (1 << 13)
 #define  GEN11_GRDOM_VECS2             (1 << 14)
+#define  GEN11_GRDOM_SFC0              (1 << 17)
+#define  GEN11_GRDOM_SFC1              (1 << 18)
+
+#define  GEN11_VCS_SFC_RESET_BIT(instance)     (GEN11_GRDOM_SFC0 << ((instance) >> 1))
+#define  GEN11_VECS_SFC_RESET_BIT(instance)    (GEN11_GRDOM_SFC0 << (instance))
+
+#define GEN11_VCS_SFC_FORCED_LOCK(engine)      _MMIO((engine)->mmio_base + 0x88C)
+#define   GEN11_VCS_SFC_FORCED_LOCK_BIT                (1 << 0)
+#define GEN11_VCS_SFC_LOCK_STATUS(engine)      _MMIO((engine)->mmio_base + 0x890)
+#define   GEN11_VCS_SFC_USAGE_BIT              (1 << 0)
+#define   GEN11_VCS_SFC_LOCK_ACK_BIT           (1 << 1)
+
+#define GEN11_VECS_SFC_FORCED_LOCK(engine)     _MMIO((engine)->mmio_base + 0x201C)
+#define   GEN11_VECS_SFC_FORCED_LOCK_BIT       (1 << 0)
+#define GEN11_VECS_SFC_LOCK_ACK(engine)                _MMIO((engine)->mmio_base + 0x2018)
+#define   GEN11_VECS_SFC_LOCK_ACK_BIT          (1 << 0)
+#define GEN11_VECS_SFC_USAGE(engine)           _MMIO((engine)->mmio_base + 0x2014)
+#define   GEN11_VECS_SFC_USAGE_BIT             (1 << 0)
 
 #define RING_PP_DIR_BASE(engine)       _MMIO((engine)->mmio_base + 0x228)
 #define RING_PP_DIR_BASE_READ(engine)  _MMIO((engine)->mmio_base + 0x518)
@@ -1790,7 +1814,7 @@ enum i915_power_well_id {
 #define _CNL_PORT_TX_C_LN0_OFFSET              0x162C40
 #define _CNL_PORT_TX_D_LN0_OFFSET              0x162E40
 #define _CNL_PORT_TX_F_LN0_OFFSET              0x162840
-#define _CNL_PORT_TX_DW_GRP(port, dw)  (_PICK((port), \
+#define _CNL_PORT_TX_DW_GRP(dw, port)  (_PICK((port), \
                                               _CNL_PORT_TX_AE_GRP_OFFSET, \
                                               _CNL_PORT_TX_B_GRP_OFFSET, \
                                               _CNL_PORT_TX_B_GRP_OFFSET, \
@@ -1798,7 +1822,7 @@ enum i915_power_well_id {
                                               _CNL_PORT_TX_AE_GRP_OFFSET, \
                                               _CNL_PORT_TX_F_GRP_OFFSET) + \
                                               4 * (dw))
-#define _CNL_PORT_TX_DW_LN0(port, dw)  (_PICK((port), \
+#define _CNL_PORT_TX_DW_LN0(dw, port)  (_PICK((port), \
                                               _CNL_PORT_TX_AE_LN0_OFFSET, \
                                               _CNL_PORT_TX_B_LN0_OFFSET, \
                                               _CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1834,9 +1858,9 @@ enum i915_power_well_id {
 
 #define _CNL_PORT_TX_DW4_LN0_AE                0x162450
 #define _CNL_PORT_TX_DW4_LN1_AE                0x1624D0
-#define CNL_PORT_TX_DW4_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
-#define CNL_PORT_TX_DW4_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
-#define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
+#define CNL_PORT_TX_DW4_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
+#define CNL_PORT_TX_DW4_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
+#define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
                                           ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
                                                    _CNL_PORT_TX_DW4_LN0_AE)))
 #define ICL_PORT_TX_DW4_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@@ -1864,8 +1888,12 @@ enum i915_power_well_id {
 #define   RTERM_SELECT(x)              ((x) << 3)
 #define   RTERM_SELECT_MASK            (0x7 << 3)
 
-#define CNL_PORT_TX_DW7_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
-#define CNL_PORT_TX_DW7_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
+#define CNL_PORT_TX_DW7_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
+#define CNL_PORT_TX_DW7_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
+#define ICL_PORT_TX_DW7_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
+#define ICL_PORT_TX_DW7_GRP(port)      _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
+#define ICL_PORT_TX_DW7_LN0(port)      _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
+#define ICL_PORT_TX_DW7_LN(port, ln)   _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
 #define   N_SCALAR(x)                  ((x) << 24)
 #define   N_SCALAR_MASK                        (0x7F << 24)
 
@@ -2592,10 +2620,6 @@ enum i915_power_well_id {
 
 #define   GEN11_GFX_DISABLE_LEGACY_MODE        (1 << 3)
 
-#define VLV_DISPLAY_BASE 0x180000
-#define VLV_MIPI_BASE VLV_DISPLAY_BASE
-#define BXT_MIPI_BASE 0x60000
-
 #define VLV_GU_CTL0    _MMIO(VLV_DISPLAY_BASE + 0x2030)
 #define VLV_GU_CTL1    _MMIO(VLV_DISPLAY_BASE + 0x2034)
 #define SCPD0          _MMIO(0x209c) /* 915+ only */
@@ -2777,6 +2801,9 @@ enum i915_power_well_id {
 #define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
 #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
 
+#define GEN10_CACHE_MODE_SS                    _MMIO(0xe420)
+#define   FLOAT_BLEND_OPTIMIZATION_ENABLE      (1 << 4)
+
 /* Fuse readout registers for GT */
 #define HSW_PAVP_FUSE1                 _MMIO(0x911C)
 #define   HSW_F1_EU_DIS_SHIFT          16
@@ -3152,9 +3179,9 @@ enum i915_power_well_id {
 /*
  * Clock control & power management
  */
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
+#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
+#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
 #define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
 
 #define VGA0   _MMIO(0x6000)
@@ -3251,9 +3278,9 @@ enum i915_power_well_id {
 #define   SDVO_MULTIPLIER_SHIFT_HIRES          4
 #define   SDVO_MULTIPLIER_SHIFT_VGA            0
 
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
-#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
+#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
+#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
 #define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
 
 /*
@@ -3325,7 +3352,7 @@ enum i915_power_well_id {
 #define  DSTATE_PLL_D3_OFF                     (1 << 3)
 #define  DSTATE_GFX_CLOCK_GATING               (1 << 1)
 #define  DSTATE_DOT_CLOCK_GATING               (1 << 0)
-#define DSPCLK_GATE_D  _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
+#define DSPCLK_GATE_D  _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
 # define DPUNIT_B_CLOCK_GATE_DISABLE           (1 << 30) /* 965 */
 # define VSUNIT_CLOCK_GATE_DISABLE             (1 << 29) /* 965 */
 # define VRHUNIT_CLOCK_GATE_DISABLE            (1 << 28) /* 965 */
@@ -3465,7 +3492,7 @@ enum i915_power_well_id {
 #define _PALETTE_A             0xa000
 #define _PALETTE_B             0xa800
 #define _CHV_PALETTE_C         0xc000
-#define PALETTE(pipe, i)       _MMIO(dev_priv->info.display_mmio_offset + \
+#define PALETTE(pipe, i)       _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
                                      _PICK((pipe), _PALETTE_A,         \
                                            _PALETTE_B, _CHV_PALETTE_C) + \
                                      (i) * 4)
@@ -4248,6 +4275,15 @@ enum {
 #define EDP_PSR2_STATUS_STATE_MASK     (0xf << 28)
 #define EDP_PSR2_STATUS_STATE_SHIFT    28
 
+#define _PSR2_SU_STATUS_0              0x6F914
+#define _PSR2_SU_STATUS_1              0x6F918
+#define _PSR2_SU_STATUS_2              0x6F91C
+#define _PSR2_SU_STATUS(index)         _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1))
+#define PSR2_SU_STATUS(frame)          (_PSR2_SU_STATUS((frame) / 3))
+#define PSR2_SU_STATUS_SHIFT(frame)    (((frame) % 3) * 10)
+#define PSR2_SU_STATUS_MASK(frame)     (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
+#define PSR2_SU_STATUS_FRAMES          8
+
 /* VGA port control */
 #define ADPA                   _MMIO(0x61100)
 #define PCH_ADPA                _MMIO(0xe1100)
@@ -4298,7 +4334,7 @@ enum {
 
 
 /* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN                _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
+#define PORT_HOTPLUG_EN                _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
 #define   PORTB_HOTPLUG_INT_EN                 (1 << 29)
 #define   PORTC_HOTPLUG_INT_EN                 (1 << 28)
 #define   PORTD_HOTPLUG_INT_EN                 (1 << 27)
@@ -4328,7 +4364,7 @@ enum {
 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV       (0 << 2)
 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV       (1 << 2)
 
-#define PORT_HOTPLUG_STAT      _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
+#define PORT_HOTPLUG_STAT      _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
 /*
  * HDMI/DP bits are g4x+
  *
@@ -4410,7 +4446,7 @@ enum {
 
 #define PORT_DFT_I9XX                          _MMIO(0x61150)
 #define   DC_BALANCE_RESET                     (1 << 25)
-#define PORT_DFT2_G4X          _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
+#define PORT_DFT2_G4X          _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
 #define   DC_BALANCE_RESET_VLV                 (1 << 31)
 #define   PIPE_SCRAMBLE_RESET_MASK             ((1 << 14) | (0x3 << 0))
 #define   PIPE_C_SCRAMBLE_RESET                        (1 << 14) /* chv */
@@ -4663,7 +4699,6 @@ enum {
 #define  EDP_FORCE_VDD                 (1 << 3)
 #define  EDP_BLC_ENABLE                        (1 << 2)
 #define  PANEL_POWER_RESET             (1 << 1)
-#define  PANEL_POWER_OFF               (0 << 0)
 #define  PANEL_POWER_ON                        (1 << 0)
 
 #define _PP_ON_DELAYS                  0x61208
@@ -4695,7 +4730,7 @@ enum {
 #define  PANEL_POWER_CYCLE_DELAY_SHIFT 0
 
 /* Panel fitting */
-#define PFIT_CONTROL   _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
+#define PFIT_CONTROL   _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
 #define   PFIT_ENABLE          (1 << 31)
 #define   PFIT_PIPE_MASK       (3 << 29)
 #define   PFIT_PIPE_SHIFT      29
@@ -4713,7 +4748,7 @@ enum {
 #define   PFIT_SCALING_PROGRAMMED (1 << 26)
 #define   PFIT_SCALING_PILLAR  (2 << 26)
 #define   PFIT_SCALING_LETTER  (3 << 26)
-#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
+#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
 /* Pre-965 */
 #define                PFIT_VERT_SCALE_SHIFT           20
 #define                PFIT_VERT_SCALE_MASK            0xfff00000
@@ -4725,25 +4760,25 @@ enum {
 #define                PFIT_HORIZ_SCALE_SHIFT_965      0
 #define                PFIT_HORIZ_SCALE_MASK_965       0x00001fff
 
-#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
+#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
 
-#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
 #define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
                                         _VLV_BLC_PWM_CTL2_B)
 
-#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
 #define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
                                        _VLV_BLC_PWM_CTL_B)
 
-#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
 #define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
                                         _VLV_BLC_HIST_CTL_B)
 
 /* Backlight control */
-#define BLC_PWM_CTL2   _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2   _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
 #define   BLM_PWM_ENABLE               (1 << 31)
 #define   BLM_COMBINATION_MODE         (1 << 30) /* gen4 only */
 #define   BLM_PIPE_SELECT              (1 << 29)
@@ -4766,7 +4801,7 @@ enum {
 #define   BLM_PHASE_IN_COUNT_MASK      (0xff << 8)
 #define   BLM_PHASE_IN_INCR_SHIFT      (0)
 #define   BLM_PHASE_IN_INCR_MASK       (0xff << 0)
-#define BLC_PWM_CTL    _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
+#define BLC_PWM_CTL    _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
 /*
  * This is the most significant 15 bits of the number of backlight cycles in a
  * complete cycle of the modulated backlight control.
@@ -4788,7 +4823,7 @@ enum {
 #define   BACKLIGHT_DUTY_CYCLE_MASK_PNV                (0xfffe)
 #define   BLM_POLARITY_PNV                     (1 << 0) /* pnv only */
 
-#define BLC_HIST_CTL   _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
+#define BLC_HIST_CTL   _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
 #define  BLM_HISTOGRAM_ENABLE                  (1 << 31)
 
 /* New registers for PCH-split platforms. Safe where new bits show up, the
@@ -4863,6 +4898,7 @@ enum {
 # define TV_OVERSAMPLE_NONE            (2 << 18)
 /* Selects 8x oversampling */
 # define TV_OVERSAMPLE_8X              (3 << 18)
+# define TV_OVERSAMPLE_MASK            (3 << 18)
 /* Selects progressive mode rather than interlaced */
 # define TV_PROGRESSIVE                        (1 << 17)
 /* Sets the colorburst to PAL mode.  Required for non-M PAL modes. */
@@ -5412,47 +5448,47 @@ enum {
  * is 20 bytes in each direction, hence the 5 fixed
  * data registers
  */
-#define _DPA_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64010)
-#define _DPA_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64014)
-#define _DPA_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64018)
-#define _DPA_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6401c)
-#define _DPA_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64020)
-#define _DPA_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64024)
-
-#define _DPB_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64110)
-#define _DPB_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64114)
-#define _DPB_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64118)
-#define _DPB_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6411c)
-#define _DPB_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64120)
-#define _DPB_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64124)
-
-#define _DPC_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64210)
-#define _DPC_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64214)
-#define _DPC_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64218)
-#define _DPC_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6421c)
-#define _DPC_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64220)
-#define _DPC_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64224)
-
-#define _DPD_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64310)
-#define _DPD_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64314)
-#define _DPD_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64318)
-#define _DPD_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6431c)
-#define _DPD_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64320)
-#define _DPD_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64324)
-
-#define _DPE_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64410)
-#define _DPE_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64414)
-#define _DPE_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64418)
-#define _DPE_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6441c)
-#define _DPE_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64420)
-#define _DPE_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64424)
-
-#define _DPF_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64510)
-#define _DPF_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64514)
-#define _DPF_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64518)
-#define _DPF_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6451c)
-#define _DPF_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64520)
-#define _DPF_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64524)
+#define _DPA_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
+#define _DPA_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
+#define _DPA_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
+#define _DPA_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
+#define _DPA_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
+#define _DPA_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
+
+#define _DPB_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPB_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+#define _DPB_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
+#define _DPB_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
+#define _DPB_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
+#define _DPB_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
+
+#define _DPC_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
+#define _DPC_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
+#define _DPC_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
+#define _DPC_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
+#define _DPC_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
+#define _DPC_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
+
+#define _DPD_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
+#define _DPD_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
+#define _DPD_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
+#define _DPD_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
+#define _DPD_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
+#define _DPD_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
+
+#define _DPE_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
+#define _DPE_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
+#define _DPE_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
+#define _DPE_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
+#define _DPE_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
+#define _DPE_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
+
+#define _DPF_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
+#define _DPF_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
+#define _DPF_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
+#define _DPF_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
+#define _DPF_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
+#define _DPF_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
 
 #define DP_AUX_CH_CTL(aux_ch)  _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
 #define DP_AUX_CH_DATA(aux_ch, i)      _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5677,6 +5713,12 @@ enum {
 #define   PIPEMISC_DITHER_TYPE_SP      (0 << 2)
 #define PIPEMISC(pipe)                 _MMIO_PIPE2(pipe, _PIPE_MISC_A)
 
+/* Skylake+ pipe bottom (background) color */
+#define _SKL_BOTTOM_COLOR_A            0x70034
+#define   SKL_BOTTOM_COLOR_GAMMA_ENABLE        (1 << 31)
+#define   SKL_BOTTOM_COLOR_CSC_ENABLE  (1 << 30)
+#define SKL_BOTTOM_COLOR(pipe)         _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A)
+
 #define VLV_DPFLIPSTAT                         _MMIO(VLV_DISPLAY_BASE + 0x70028)
 #define   PIPEB_LINE_COMPARE_INT_EN            (1 << 29)
 #define   PIPEB_HLINE_INT_EN                   (1 << 28)
@@ -5728,7 +5770,7 @@ enum {
 #define   DPINVGTT_STATUS_MASK                 0xff
 #define   DPINVGTT_STATUS_MASK_CHV             0xfff
 
-#define DSPARB                 _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
+#define DSPARB                 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
 #define   DSPARB_CSTART_MASK   (0x7f << 7)
 #define   DSPARB_CSTART_SHIFT  7
 #define   DSPARB_BSTART_MASK   (0x7f)
@@ -5763,7 +5805,7 @@ enum {
 #define   DSPARB_SPRITEF_MASK_VLV      (0xff << 8)
 
 /* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1         _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
+#define DSPFW1         _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
 #define   DSPFW_SR_SHIFT               23
 #define   DSPFW_SR_MASK                        (0x1ff << 23)
 #define   DSPFW_CURSORB_SHIFT          16
@@ -5774,7 +5816,7 @@ enum {
 #define   DSPFW_PLANEA_SHIFT           0
 #define   DSPFW_PLANEA_MASK            (0x7f << 0)
 #define   DSPFW_PLANEA_MASK_VLV                (0xff << 0) /* vlv/chv */
-#define DSPFW2         _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
+#define DSPFW2         _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
 #define   DSPFW_FBC_SR_EN              (1 << 31)         /* g4x */
 #define   DSPFW_FBC_SR_SHIFT           28
 #define   DSPFW_FBC_SR_MASK            (0x7 << 28) /* g4x */
@@ -5790,7 +5832,7 @@ enum {
 #define   DSPFW_SPRITEA_SHIFT          0
 #define   DSPFW_SPRITEA_MASK           (0x7f << 0) /* g4x */
 #define   DSPFW_SPRITEA_MASK_VLV       (0xff << 0) /* vlv/chv */
-#define DSPFW3         _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
+#define DSPFW3         _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
 #define   DSPFW_HPLL_SR_EN             (1 << 31)
 #define   PINEVIEW_SELF_REFRESH_EN     (1 << 30)
 #define   DSPFW_CURSOR_SR_SHIFT                24
@@ -5958,7 +6000,7 @@ enum {
 #define   PLANE_WM_EN          (1 << 31)
 #define   PLANE_WM_LINES_SHIFT 14
 #define   PLANE_WM_LINES_MASK  0x1f
-#define   PLANE_WM_BLOCKS_MASK 0x3ff
+#define   PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */
 
 #define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
 #define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
@@ -6206,35 +6248,35 @@ enum {
  * [10:1f] all
  * [30:32] all
  */
-#define SWF0(i)        _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
-#define SWF1(i)        _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
-#define SWF3(i)        _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
+#define SWF0(i)        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(i)        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(i)        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
 #define SWF_ILK(i)     _MMIO(0x4F000 + (i) * 4)
 
 /* Pipe B */
-#define _PIPEBDSL              (dev_priv->info.display_mmio_offset + 0x71000)
-#define _PIPEBCONF             (dev_priv->info.display_mmio_offset + 0x71008)
-#define _PIPEBSTAT             (dev_priv->info.display_mmio_offset + 0x71024)
+#define _PIPEBDSL              (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
+#define _PIPEBCONF             (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
+#define _PIPEBSTAT             (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
 #define _PIPEBFRAMEHIGH                0x71040
 #define _PIPEBFRAMEPIXEL       0x71044
-#define _PIPEB_FRMCOUNT_G4X    (dev_priv->info.display_mmio_offset + 0x71040)
-#define _PIPEB_FLIPCOUNT_G4X   (dev_priv->info.display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_G4X    (DISPLAY_MMIO_BASE(dev_priv) + 0x71040)
+#define _PIPEB_FLIPCOUNT_G4X   (DISPLAY_MMIO_BASE(dev_priv) + 0x71044)
 
 
 /* Display B control */
-#define _DSPBCNTR              (dev_priv->info.display_mmio_offset + 0x71180)
+#define _DSPBCNTR              (DISPLAY_MMIO_BASE(dev_priv) + 0x71180)
 #define   DISPPLANE_ALPHA_TRANS_ENABLE         (1 << 15)
 #define   DISPPLANE_ALPHA_TRANS_DISABLE                0
 #define   DISPPLANE_SPRITE_ABOVE_DISPLAY       0
 #define   DISPPLANE_SPRITE_ABOVE_OVERLAY       (1)
-#define _DSPBADDR              (dev_priv->info.display_mmio_offset + 0x71184)
-#define _DSPBSTRIDE            (dev_priv->info.display_mmio_offset + 0x71188)
-#define _DSPBPOS               (dev_priv->info.display_mmio_offset + 0x7118C)
-#define _DSPBSIZE              (dev_priv->info.display_mmio_offset + 0x71190)
-#define _DSPBSURF              (dev_priv->info.display_mmio_offset + 0x7119C)
-#define _DSPBTILEOFF           (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBOFFSET            (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBSURFLIVE          (dev_priv->info.display_mmio_offset + 0x711AC)
+#define _DSPBADDR              (DISPLAY_MMIO_BASE(dev_priv) + 0x71184)
+#define _DSPBSTRIDE            (DISPLAY_MMIO_BASE(dev_priv) + 0x71188)
+#define _DSPBPOS               (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C)
+#define _DSPBSIZE              (DISPLAY_MMIO_BASE(dev_priv) + 0x71190)
+#define _DSPBSURF              (DISPLAY_MMIO_BASE(dev_priv) + 0x7119C)
+#define _DSPBTILEOFF           (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBOFFSET            (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBSURFLIVE          (DISPLAY_MMIO_BASE(dev_priv) + 0x711AC)
 
 /* ICL DSI 0 and 1 */
 #define _PIPEDSI0CONF          0x7b008
@@ -6742,8 +6784,7 @@ enum {
 
 #define _PLANE_BUF_CFG_1_B                     0x7127c
 #define _PLANE_BUF_CFG_2_B                     0x7137c
-#define  SKL_DDB_ENTRY_MASK                    0x3FF
-#define  ICL_DDB_ENTRY_MASK                    0x7FF
+#define  DDB_ENTRY_MASK                                0x7FF /* skl+: 10 bits, icl+ 11 bits */
 #define  DDB_ENTRY_END_SHIFT                   16
 #define _PLANE_BUF_CFG_1(pipe) \
        _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
@@ -7576,6 +7617,7 @@ enum {
 #define _PIPEB_CHICKEN                 0x71038
 #define _PIPEC_CHICKEN                 0x72038
 #define  PER_PIXEL_ALPHA_BYPASS_EN     (1 << 7)
+#define  PM_FILL_MAINTAIN_DBUF_FULLNESS        (1 << 0)
 #define PIPE_CHICKEN(pipe)             _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
                                                   _PIPEB_CHICKEN)
 
@@ -8786,7 +8828,7 @@ enum {
 #define   GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
 
 /* Audio */
-#define G4X_AUD_VID_DID                        _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
+#define G4X_AUD_VID_DID                        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
 #define   INTEL_AUDIO_DEVCL            0x808629FB
 #define   INTEL_AUDIO_DEVBLC           0x80862801
 #define   INTEL_AUDIO_DEVCTG           0x80862802
@@ -9521,7 +9563,7 @@ enum skl_power_gate {
 #define _MG_PLL3_ENABLE                0x46038
 #define _MG_PLL4_ENABLE                0x4603C
 /* Bits are the same as DPLL0_ENABLE */
-#define MG_PLL_ENABLE(port)    _MMIO_PORT((port) - PORT_C, _MG_PLL1_ENABLE, \
+#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
                                           _MG_PLL2_ENABLE)
 
 #define _MG_REFCLKIN_CTL_PORT1                         0x16892C
@@ -9530,9 +9572,9 @@ enum skl_power_gate {
 #define _MG_REFCLKIN_CTL_PORT4                         0x16B92C
 #define   MG_REFCLKIN_CTL_OD_2_MUX(x)                  ((x) << 8)
 #define   MG_REFCLKIN_CTL_OD_2_MUX_MASK                        (0x7 << 8)
-#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
-                                        _MG_REFCLKIN_CTL_PORT1, \
-                                        _MG_REFCLKIN_CTL_PORT2)
+#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \
+                                           _MG_REFCLKIN_CTL_PORT1, \
+                                           _MG_REFCLKIN_CTL_PORT2)
 
 #define _MG_CLKTOP2_CORECLKCTL1_PORT1                  0x1688D8
 #define _MG_CLKTOP2_CORECLKCTL1_PORT2                  0x1698D8
@@ -9542,9 +9584,9 @@ enum skl_power_gate {
 #define   MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK       (0xff << 16)
 #define   MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x)         ((x) << 8)
 #define   MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK       (0xff << 8)
-#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
-                                               _MG_CLKTOP2_CORECLKCTL1_PORT1, \
-                                               _MG_CLKTOP2_CORECLKCTL1_PORT2)
+#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \
+                                                  _MG_CLKTOP2_CORECLKCTL1_PORT1, \
+                                                  _MG_CLKTOP2_CORECLKCTL1_PORT2)
 
 #define _MG_CLKTOP2_HSCLKCTL_PORT1                     0x1688D4
 #define _MG_CLKTOP2_HSCLKCTL_PORT2                     0x1698D4
@@ -9562,9 +9604,9 @@ enum skl_power_gate {
 #define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x)           ((x) << 8)
 #define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT                8
 #define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK         (0xf << 8)
-#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
-                                            _MG_CLKTOP2_HSCLKCTL_PORT1, \
-                                            _MG_CLKTOP2_HSCLKCTL_PORT2)
+#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \
+                                               _MG_CLKTOP2_HSCLKCTL_PORT1, \
+                                               _MG_CLKTOP2_HSCLKCTL_PORT2)
 
 #define _MG_PLL_DIV0_PORT1                             0x168A00
 #define _MG_PLL_DIV0_PORT2                             0x169A00
@@ -9576,8 +9618,8 @@ enum skl_power_gate {
 #define   MG_PLL_DIV0_FBDIV_FRAC(x)                    ((x) << 8)
 #define   MG_PLL_DIV0_FBDIV_INT_MASK                   (0xff << 0)
 #define   MG_PLL_DIV0_FBDIV_INT(x)                     ((x) << 0)
-#define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \
-                                    _MG_PLL_DIV0_PORT2)
+#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \
+                                       _MG_PLL_DIV0_PORT2)
 
 #define _MG_PLL_DIV1_PORT1                             0x168A04
 #define _MG_PLL_DIV1_PORT2                             0x169A04
@@ -9591,8 +9633,8 @@ enum skl_power_gate {
 #define   MG_PLL_DIV1_NDIVRATIO(x)                     ((x) << 4)
 #define   MG_PLL_DIV1_FBPREDIV_MASK                    (0xf << 0)
 #define   MG_PLL_DIV1_FBPREDIV(x)                      ((x) << 0)
-#define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \
-                                    _MG_PLL_DIV1_PORT2)
+#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \
+                                       _MG_PLL_DIV1_PORT2)
 
 #define _MG_PLL_LF_PORT1                               0x168A08
 #define _MG_PLL_LF_PORT2                               0x169A08
@@ -9604,8 +9646,8 @@ enum skl_power_gate {
 #define   MG_PLL_LF_GAINCTRL(x)                                ((x) << 16)
 #define   MG_PLL_LF_INT_COEFF(x)                       ((x) << 8)
 #define   MG_PLL_LF_PROP_COEFF(x)                      ((x) << 0)
-#define MG_PLL_LF(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_LF_PORT1, \
-                                  _MG_PLL_LF_PORT2)
+#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \
+                                     _MG_PLL_LF_PORT2)
 
 #define _MG_PLL_FRAC_LOCK_PORT1                                0x168A0C
 #define _MG_PLL_FRAC_LOCK_PORT2                                0x169A0C
@@ -9617,9 +9659,9 @@ enum skl_power_gate {
 #define   MG_PLL_FRAC_LOCK_DCODITHEREN                 (1 << 10)
 #define   MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN              (1 << 8)
 #define   MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x)             ((x) << 0)
-#define MG_PLL_FRAC_LOCK(port) _MMIO_PORT((port) - PORT_C, \
-                                         _MG_PLL_FRAC_LOCK_PORT1, \
-                                         _MG_PLL_FRAC_LOCK_PORT2)
+#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \
+                                            _MG_PLL_FRAC_LOCK_PORT1, \
+                                            _MG_PLL_FRAC_LOCK_PORT2)
 
 #define _MG_PLL_SSC_PORT1                              0x168A10
 #define _MG_PLL_SSC_PORT2                              0x169A10
@@ -9631,8 +9673,8 @@ enum skl_power_gate {
 #define   MG_PLL_SSC_STEPNUM(x)                                ((x) << 10)
 #define   MG_PLL_SSC_FLLEN                             (1 << 9)
 #define   MG_PLL_SSC_STEPSIZE(x)                       ((x) << 0)
-#define MG_PLL_SSC(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_SSC_PORT1, \
-                                   _MG_PLL_SSC_PORT2)
+#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \
+                                      _MG_PLL_SSC_PORT2)
 
 #define _MG_PLL_BIAS_PORT1                             0x168A14
 #define _MG_PLL_BIAS_PORT2                             0x169A14
@@ -9651,8 +9693,8 @@ enum skl_power_gate {
 #define   MG_PLL_BIAS_VREF_RDAC_MASK                   (0x7 << 5)
 #define   MG_PLL_BIAS_IREFTRIM(x)                      ((x) << 0)
 #define   MG_PLL_BIAS_IREFTRIM_MASK                    (0x1f << 0)
-#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
-                                    _MG_PLL_BIAS_PORT2)
+#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \
+                                       _MG_PLL_BIAS_PORT2)
 
 #define _MG_PLL_TDC_COLDST_BIAS_PORT1                  0x168A18
 #define _MG_PLL_TDC_COLDST_BIAS_PORT2                  0x169A18
@@ -9663,9 +9705,9 @@ enum skl_power_gate {
 #define   MG_PLL_TDC_COLDST_COLDSTART                  (1 << 16)
 #define   MG_PLL_TDC_TDCOVCCORR_EN                     (1 << 2)
 #define   MG_PLL_TDC_TDCSEL(x)                         ((x) << 0)
-#define MG_PLL_TDC_COLDST_BIAS(port) _MMIO_PORT((port) - PORT_C, \
-                                               _MG_PLL_TDC_COLDST_BIAS_PORT1, \
-                                               _MG_PLL_TDC_COLDST_BIAS_PORT2)
+#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \
+                                                  _MG_PLL_TDC_COLDST_BIAS_PORT1, \
+                                                  _MG_PLL_TDC_COLDST_BIAS_PORT2)
 
 #define _CNL_DPLL0_CFGCR0              0x6C000
 #define _CNL_DPLL1_CFGCR0              0x6C080
index cefefc11d922636418294760681b8ade6a310a1d..c2a5c48c7541d6d1bb230933748b210ff036bd78 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/sched/signal.h>
 
 #include "i915_drv.h"
+#include "i915_active.h"
+#include "i915_reset.h"
 
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
@@ -59,7 +61,7 @@ static bool i915_fence_signaled(struct dma_fence *fence)
 
 static bool i915_fence_enable_signaling(struct dma_fence *fence)
 {
-       return intel_engine_enable_signaling(to_request(fence), true);
+       return i915_request_enable_breadcrumb(to_request(fence));
 }
 
 static signed long i915_fence_wait(struct dma_fence *fence,
@@ -111,99 +113,10 @@ i915_request_remove_from_client(struct i915_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
+static void reserve_gt(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       struct i915_timeline *timeline;
-       enum intel_engine_id id;
-       int ret;
-
-       /* Carefully retire all requests without writing to the rings */
-       ret = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED,
-                                    MAX_SCHEDULE_TIMEOUT);
-       if (ret)
-               return ret;
-
-       GEM_BUG_ON(i915->gt.active_requests);
-
-       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       for_each_engine(engine, i915, id) {
-               GEM_TRACE("%s seqno %d (current %d) -> %d\n",
-                         engine->name,
-                         engine->timeline.seqno,
-                         intel_engine_get_seqno(engine),
-                         seqno);
-
-               if (seqno == engine->timeline.seqno)
-                       continue;
-
-               kthread_park(engine->breadcrumbs.signaler);
-
-               if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
-                       /* Flush any waiters before we reuse the seqno */
-                       intel_engine_disarm_breadcrumbs(engine);
-                       intel_engine_init_hangcheck(engine);
-                       GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
-               }
-
-               /* Check we are idle before we fiddle with hw state! */
-               GEM_BUG_ON(!intel_engine_is_idle(engine));
-               GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
-
-               /* Finally reset hw state */
-               intel_engine_init_global_seqno(engine, seqno);
-               engine->timeline.seqno = seqno;
-
-               kthread_unpark(engine->breadcrumbs.signaler);
-       }
-
-       list_for_each_entry(timeline, &i915->gt.timelines, link)
-               memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
-
-       i915->gt.request_serial = seqno;
-
-       return 0;
-}
-
-int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *i915 = to_i915(dev);
-
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
-       if (seqno == 0)
-               return -EINVAL;
-
-       /* HWS page needs to be set less than what we will inject to ring */
-       return reset_all_global_seqno(i915, seqno - 1);
-}
-
-static int reserve_gt(struct drm_i915_private *i915)
-{
-       int ret;
-
-       /*
-        * Reservation is fine until we may need to wrap around
-        *
-        * By incrementing the serial for every request, we know that no
-        * individual engine may exceed that serial (as each is reset to 0
-        * on any wrap). This protects even the most pessimistic of migrations
-        * of every request from all engines onto just one.
-        */
-       while (unlikely(++i915->gt.request_serial == 0)) {
-               ret = reset_all_global_seqno(i915, 0);
-               if (ret) {
-                       i915->gt.request_serial--;
-                       return ret;
-               }
-       }
-
        if (!i915->gt.active_requests++)
                i915_gem_unpark(i915);
-
-       return 0;
 }
 
 static void unreserve_gt(struct drm_i915_private *i915)
@@ -213,12 +126,6 @@ static void unreserve_gt(struct drm_i915_private *i915)
                i915_gem_park(i915);
 }
 
-void i915_gem_retire_noop(struct i915_gem_active *active,
-                         struct i915_request *request)
-{
-       /* Space left intentionally blank */
-}
-
 static void advance_ring(struct i915_request *request)
 {
        struct intel_ring *ring = request->ring;
@@ -270,10 +177,11 @@ static void free_capture_list(struct i915_request *request)
 static void __retire_engine_request(struct intel_engine_cs *engine,
                                    struct i915_request *rq)
 {
-       GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d\n",
+       GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
                  __func__, engine->name,
                  rq->fence.context, rq->fence.seqno,
                  rq->global_seqno,
+                 hwsp_seqno(rq),
                  intel_engine_get_seqno(engine));
 
        GEM_BUG_ON(!i915_request_completed(rq));
@@ -286,10 +194,11 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
        spin_unlock(&engine->timeline.lock);
 
        spin_lock(&rq->lock);
-       if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+       i915_request_mark_complete(rq);
+       if (!i915_request_signaled(rq))
                dma_fence_signal_locked(&rq->fence);
        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
-               intel_engine_cancel_signaling(rq);
+               i915_request_cancel_breadcrumb(rq);
        if (rq->waitboost) {
                GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
                atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
@@ -330,12 +239,13 @@ static void __retire_engine_upto(struct intel_engine_cs *engine,
 
 static void i915_request_retire(struct i915_request *request)
 {
-       struct i915_gem_active *active, *next;
+       struct i915_active_request *active, *next;
 
-       GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
+       GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
                  request->engine->name,
                  request->fence.context, request->fence.seqno,
                  request->global_seqno,
+                 hwsp_seqno(request),
                  intel_engine_get_seqno(request->engine));
 
        lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -363,10 +273,10 @@ static void i915_request_retire(struct i915_request *request)
                 * we may spend an inordinate amount of time simply handling
                 * the retirement of requests and processing their callbacks.
                 * Of which, this loop itself is particularly hot due to the
-                * cache misses when jumping around the list of i915_gem_active.
-                * So we try to keep this loop as streamlined as possible and
-                * also prefetch the next i915_gem_active to try and hide
-                * the likely cache miss.
+                * cache misses when jumping around the list of
+                * i915_active_request.  So we try to keep this loop as
+                * streamlined as possible and also prefetch the next
+                * i915_active_request to try and hide the likely cache miss.
                 */
                prefetchw(next);
 
@@ -395,10 +305,11 @@ void i915_request_retire_upto(struct i915_request *rq)
        struct intel_ring *ring = rq->ring;
        struct i915_request *tmp;
 
-       GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
+       GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
                  rq->engine->name,
                  rq->fence.context, rq->fence.seqno,
                  rq->global_seqno,
+                 hwsp_seqno(rq),
                  intel_engine_get_seqno(rq->engine));
 
        lockdep_assert_held(&rq->i915->drm.struct_mutex);
@@ -417,7 +328,7 @@ void i915_request_retire_upto(struct i915_request *rq)
 
 static u32 timeline_get_seqno(struct i915_timeline *tl)
 {
-       return ++tl->seqno;
+       return tl->seqno += 1 + tl->has_initial_breadcrumb;
 }
 
 static void move_to_timeline(struct i915_request *request,
@@ -431,15 +342,23 @@ static void move_to_timeline(struct i915_request *request,
        spin_unlock(&request->timeline->lock);
 }
 
+static u32 next_global_seqno(struct i915_timeline *tl)
+{
+       if (!++tl->seqno)
+               ++tl->seqno;
+       return tl->seqno;
+}
+
 void __i915_request_submit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        u32 seqno;
 
-       GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d\n",
+       GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
                  engine->name,
                  request->fence.context, request->fence.seqno,
                  engine->timeline.seqno + 1,
+                 hwsp_seqno(request),
                  intel_engine_get_seqno(engine));
 
        GEM_BUG_ON(!irqs_disabled());
@@ -447,26 +366,27 @@ void __i915_request_submit(struct i915_request *request)
 
        GEM_BUG_ON(request->global_seqno);
 
-       seqno = timeline_get_seqno(&engine->timeline);
+       seqno = next_global_seqno(&engine->timeline);
        GEM_BUG_ON(!seqno);
        GEM_BUG_ON(intel_engine_signaled(engine, seqno));
 
        /* We may be recursing from the signal callback of another i915 fence */
        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+       GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+       set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
        request->global_seqno = seqno;
-       if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
-               intel_engine_enable_signaling(request, false);
+       if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
+           !i915_request_enable_breadcrumb(request))
+               intel_engine_queue_breadcrumbs(engine);
        spin_unlock(&request->lock);
 
-       engine->emit_breadcrumb(request,
-                               request->ring->vaddr + request->postfix);
+       engine->emit_fini_breadcrumb(request,
+                                    request->ring->vaddr + request->postfix);
 
        /* Transfer from per-context onto the global per-engine timeline */
        move_to_timeline(request, &engine->timeline);
 
        trace_i915_request_execute(request);
-
-       wake_up_all(&request->execute);
 }
 
 void i915_request_submit(struct i915_request *request)
@@ -486,10 +406,11 @@ void __i915_request_unsubmit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
 
-       GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d\n",
+       GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
                  engine->name,
                  request->fence.context, request->fence.seqno,
                  request->global_seqno,
+                 hwsp_seqno(request),
                  intel_engine_get_seqno(engine));
 
        GEM_BUG_ON(!irqs_disabled());
@@ -508,7 +429,9 @@ void __i915_request_unsubmit(struct i915_request *request)
        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
        request->global_seqno = 0;
        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
-               intel_engine_cancel_signaling(request);
+               i915_request_cancel_breadcrumb(request);
+       GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+       clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
        spin_unlock(&request->lock);
 
        /* Transfer back from the global per-engine timeline to per-context */
@@ -566,6 +489,43 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
        return NOTIFY_DONE;
 }
 
+static void ring_retire_requests(struct intel_ring *ring)
+{
+       struct i915_request *rq, *rn;
+
+       list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
+               if (!i915_request_completed(rq))
+                       break;
+
+               i915_request_retire(rq);
+       }
+}
+
+static noinline struct i915_request *
+i915_request_alloc_slow(struct intel_context *ce)
+{
+       struct intel_ring *ring = ce->ring;
+       struct i915_request *rq;
+
+       if (list_empty(&ring->request_list))
+               goto out;
+
+       /* Ratelimit ourselves to prevent oom from malicious clients */
+       rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+       cond_synchronize_rcu(rq->rcustate);
+
+       /* Retire our old requests in the hope that we free some */
+       ring_retire_requests(ring);
+
+out:
+       return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
+}
+
+static int add_timeline_barrier(struct i915_request *rq)
+{
+       return i915_request_await_active_request(rq, &rq->timeline->barrier);
+}
+
 /**
  * i915_request_alloc - allocate a request structure
  *
@@ -608,13 +568,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        if (IS_ERR(ce))
                return ERR_CAST(ce);
 
-       ret = reserve_gt(i915);
-       if (ret)
-               goto err_unpin;
-
-       ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
-       if (ret)
-               goto err_unreserve;
+       reserve_gt(i915);
 
        /* Move our oldest request to the slab-cache (if not in use!) */
        rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -628,7 +582,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         * We use RCU to look up requests in flight. The lookups may
         * race with the request being allocated from the slab freelist.
         * That is the request we are writing to here, may be in the process
-        * of being read by __i915_gem_active_get_rcu(). As such,
+        * of being read by __i915_active_request_get_rcu(). As such,
         * we have to be very careful when overwriting the contents. During
         * the RCU lookup, we change chase the request->engine pointer,
         * read the request->global_seqno and increment the reference count.
@@ -654,15 +608,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        rq = kmem_cache_alloc(i915->requests,
                              GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
        if (unlikely(!rq)) {
-               i915_retire_requests(i915);
-
-               /* Ratelimit ourselves to prevent oom from malicious clients */
-               rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
-                                        &i915->drm.struct_mutex);
-               if (rq)
-                       cond_synchronize_rcu(rq->rcustate);
-
-               rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+               rq = i915_request_alloc_slow(ce);
                if (!rq) {
                        ret = -ENOMEM;
                        goto err_unreserve;
@@ -679,6 +625,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        rq->ring = ce->ring;
        rq->timeline = ce->ring->timeline;
        GEM_BUG_ON(rq->timeline == &engine->timeline);
+       rq->hwsp_seqno = rq->timeline->hwsp_seqno;
 
        spin_lock_init(&rq->lock);
        dma_fence_init(&rq->fence,
@@ -689,13 +636,11 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 
        /* We bump the ref for the fence chain */
        i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
-       init_waitqueue_head(&rq->execute);
 
        i915_sched_node_init(&rq->sched);
 
        /* No zalloc, must clear what we need by hand */
        rq->global_seqno = 0;
-       rq->signaling.wait.seqno = 0;
        rq->file_priv = NULL;
        rq->batch = NULL;
        rq->capture_list = NULL;
@@ -707,9 +652,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         * i915_request_add() call can't fail. Note that the reserve may need
         * to be redone if the request is not actually submitted straight
         * away, e.g. because a GPU scheduler has deferred it.
+        *
+        * Note that due to how we add reserved_space to intel_ring_begin()
+        * we need to double our request to ensure that if we need to wrap
+        * around inside i915_request_add() there is sufficient space at
+        * the beginning of the ring as well.
         */
-       rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
-       GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
+       rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32);
 
        /*
         * Record the position of the start of the request so that
@@ -719,8 +668,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         */
        rq->head = rq->ring->emit;
 
-       /* Unconditionally invalidate GPU caches and TLBs. */
-       ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+       ret = add_timeline_barrier(rq);
        if (ret)
                goto err_unwind;
 
@@ -748,7 +696,6 @@ err_unwind:
        kmem_cache_free(i915->requests, rq);
 err_unreserve:
        unreserve_gt(i915);
-err_unpin:
        intel_context_unpin(ce);
        return ERR_PTR(ret);
 }
@@ -776,34 +723,12 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
                ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
                                                       &from->submit,
                                                       I915_FENCE_GFP);
-               return ret < 0 ? ret : 0;
-       }
-
-       if (to->engine->semaphore.sync_to) {
-               u32 seqno;
-
-               GEM_BUG_ON(!from->engine->semaphore.signal);
-
-               seqno = i915_request_global_seqno(from);
-               if (!seqno)
-                       goto await_dma_fence;
-
-               if (seqno <= to->timeline->global_sync[from->engine->id])
-                       return 0;
-
-               trace_i915_gem_ring_sync_to(to, from);
-               ret = to->engine->semaphore.sync_to(to, from);
-               if (ret)
-                       return ret;
-
-               to->timeline->global_sync[from->engine->id] = seqno;
-               return 0;
+       } else {
+               ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                   &from->fence, 0,
+                                                   I915_FENCE_GFP);
        }
 
-await_dma_fence:
-       ret = i915_sw_fence_await_dma_fence(&to->submit,
-                                           &from->fence, 0,
-                                           I915_FENCE_GFP);
        return ret < 0 ? ret : 0;
 }
 
@@ -979,8 +904,8 @@ void i915_request_add(struct i915_request *request)
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
+       GEM_BUG_ON(request->reserved_space > request->ring->space);
        request->reserved_space = 0;
-       engine->emit_flush(request, EMIT_FLUSH);
 
        /*
         * Record the position of the start of the breadcrumb so that
@@ -988,7 +913,7 @@ void i915_request_add(struct i915_request *request)
         * GPU processing the request, we never over-estimate the
         * position of the ring's HEAD.
         */
-       cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+       cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw);
        GEM_BUG_ON(IS_ERR(cs));
        request->postfix = intel_ring_offset(request, cs);
 
@@ -999,8 +924,8 @@ void i915_request_add(struct i915_request *request)
         * see a more recent value in the hws than we are tracking.
         */
 
-       prev = i915_gem_active_raw(&timeline->last_request,
-                                  &request->i915->drm.struct_mutex);
+       prev = i915_active_request_raw(&timeline->last_request,
+                                      &request->i915->drm.struct_mutex);
        if (prev && !i915_request_completed(prev)) {
                i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
                                             &request->submitq);
@@ -1016,7 +941,7 @@ void i915_request_add(struct i915_request *request)
        spin_unlock_irq(&timeline->lock);
 
        GEM_BUG_ON(timeline->seqno != request->fence.seqno);
-       i915_gem_active_set(&timeline->last_request, request);
+       __i915_active_request_set(&timeline->last_request, request);
 
        list_add_tail(&request->ring_link, &ring->request_list);
        if (list_is_first(&request->ring_link, &ring->request_list)) {
@@ -1047,7 +972,7 @@ void i915_request_add(struct i915_request *request)
                 * Allow interactive/synchronous clients to jump ahead of
                 * the bulk clients. (FQ_CODEL)
                 */
-               if (!prev || i915_request_completed(prev))
+               if (list_empty(&request->sched.signalers_list))
                        attr.priority |= I915_PRIORITY_NEWCLIENT;
 
                engine->schedule(request, &attr);
@@ -1110,13 +1035,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
        return this_cpu != cpu;
 }
 
-static bool __i915_spin_request(const struct i915_request *rq,
-                               u32 seqno, int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq,
+                               int state, unsigned long timeout_us)
 {
-       struct intel_engine_cs *engine = rq->engine;
-       unsigned int irq, cpu;
-
-       GEM_BUG_ON(!seqno);
+       unsigned int cpu;
 
        /*
         * Only wait for the request if we know it is likely to complete.
@@ -1124,12 +1046,12 @@ static bool __i915_spin_request(const struct i915_request *rq,
         * We don't track the timestamps around requests, nor the average
         * request length, so we do not have a good indicator that this
         * request will complete within the timeout. What we do know is the
-        * order in which requests are executed by the engine and so we can
-        * tell if the request has started. If the request hasn't started yet,
-        * it is a fair assumption that it will not complete within our
-        * relatively short timeout.
+        * order in which requests are executed by the context and so we can
+        * tell if the request has been started. If the request is not even
+        * running yet, it is a fair assumption that it will not complete
+        * within our relatively short timeout.
         */
-       if (!intel_engine_has_started(engine, seqno))
+       if (!i915_request_is_running(rq))
                return false;
 
        /*
@@ -1143,20 +1065,10 @@ static bool __i915_spin_request(const struct i915_request *rq,
         * takes to sleep on a request, on the order of a microsecond.
         */
 
-       irq = READ_ONCE(engine->breadcrumbs.irq_count);
        timeout_us += local_clock_us(&cpu);
        do {
-               if (intel_engine_has_completed(engine, seqno))
-                       return seqno == i915_request_global_seqno(rq);
-
-               /*
-                * Seqno are meant to be ordered *before* the interrupt. If
-                * we see an interrupt without a corresponding seqno advance,
-                * assume we won't see one in the near future but require
-                * the engine->seqno_barrier() to fixup coherency.
-                */
-               if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
-                       break;
+               if (i915_request_completed(rq))
+                       return true;
 
                if (signal_pending_state(state, current))
                        break;
@@ -1170,16 +1082,16 @@ static bool __i915_spin_request(const struct i915_request *rq,
        return false;
 }
 
-static bool __i915_wait_request_check_and_reset(struct i915_request *request)
-{
-       struct i915_gpu_error *error = &request->i915->gpu_error;
+struct request_wait {
+       struct dma_fence_cb cb;
+       struct task_struct *tsk;
+};
 
-       if (likely(!i915_reset_handoff(error)))
-               return false;
+static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+       struct request_wait *wait = container_of(cb, typeof(*wait), cb);
 
-       __set_current_state(TASK_RUNNING);
-       i915_reset(request->i915, error->stalled_mask, error->reason);
-       return true;
+       wake_up_process(wait->tsk);
 }
 
 /**
@@ -1207,17 +1119,9 @@ long i915_request_wait(struct i915_request *rq,
 {
        const int state = flags & I915_WAIT_INTERRUPTIBLE ?
                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
-       DEFINE_WAIT_FUNC(reset, default_wake_function);
-       DEFINE_WAIT_FUNC(exec, default_wake_function);
-       struct intel_wait wait;
+       struct request_wait wait;
 
        might_sleep();
-#if IS_ENABLED(CONFIG_LOCKDEP)
-       GEM_BUG_ON(debug_locks &&
-                  !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
-                  !!(flags & I915_WAIT_LOCKED));
-#endif
        GEM_BUG_ON(timeout < 0);
 
        if (i915_request_completed(rq))
@@ -1228,57 +1132,23 @@ long i915_request_wait(struct i915_request *rq,
 
        trace_i915_request_wait_begin(rq, flags);
 
-       add_wait_queue(&rq->execute, &exec);
-       if (flags & I915_WAIT_LOCKED)
-               add_wait_queue(errq, &reset);
+       /* Optimistic short spin before touching IRQs */
+       if (__i915_spin_request(rq, state, 5))
+               goto out;
 
-       intel_wait_init(&wait);
        if (flags & I915_WAIT_PRIORITY)
                i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
 
-restart:
-       do {
-               set_current_state(state);
-               if (intel_wait_update_request(&wait, rq))
-                       break;
-
-               if (flags & I915_WAIT_LOCKED &&
-                   __i915_wait_request_check_and_reset(rq))
-                       continue;
-
-               if (signal_pending_state(state, current)) {
-                       timeout = -ERESTARTSYS;
-                       goto complete;
-               }
-
-               if (!timeout) {
-                       timeout = -ETIME;
-                       goto complete;
-               }
-
-               timeout = io_schedule_timeout(timeout);
-       } while (1);
-
-       GEM_BUG_ON(!intel_wait_has_seqno(&wait));
-       GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+       wait.tsk = current;
+       if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
+               goto out;
 
-       /* Optimistic short spin before touching IRQs */
-       if (__i915_spin_request(rq, wait.seqno, state, 5))
-               goto complete;
-
-       set_current_state(state);
-       if (intel_engine_add_wait(rq->engine, &wait))
-               /*
-                * In order to check that we haven't missed the interrupt
-                * as we enabled it, we need to kick ourselves to do a
-                * coherent check on the seqno before we sleep.
-                */
-               goto wakeup;
+       for (;;) {
+               set_current_state(state);
 
-       if (flags & I915_WAIT_LOCKED)
-               __i915_wait_request_check_and_reset(rq);
+               if (i915_request_completed(rq))
+                       break;
 
-       for (;;) {
                if (signal_pending_state(state, current)) {
                        timeout = -ERESTARTSYS;
                        break;
@@ -1290,70 +1160,14 @@ restart:
                }
 
                timeout = io_schedule_timeout(timeout);
-
-               if (intel_wait_complete(&wait) &&
-                   intel_wait_check_request(&wait, rq))
-                       break;
-
-               set_current_state(state);
-
-wakeup:
-               /*
-                * Carefully check if the request is complete, giving time
-                * for the seqno to be visible following the interrupt.
-                * We also have to check in case we are kicked by the GPU
-                * reset in order to drop the struct_mutex.
-                */
-               if (__i915_request_irq_complete(rq))
-                       break;
-
-               /*
-                * If the GPU is hung, and we hold the lock, reset the GPU
-                * and then check for completion. On a full reset, the engine's
-                * HW seqno will be advanced passed us and we are complete.
-                * If we do a partial reset, we have to wait for the GPU to
-                * resume and update the breadcrumb.
-                *
-                * If we don't hold the mutex, we can just wait for the worker
-                * to come along and update the breadcrumb (either directly
-                * itself, or indirectly by recovering the GPU).
-                */
-               if (flags & I915_WAIT_LOCKED &&
-                   __i915_wait_request_check_and_reset(rq))
-                       continue;
-
-               /* Only spin if we know the GPU is processing this request */
-               if (__i915_spin_request(rq, wait.seqno, state, 2))
-                       break;
-
-               if (!intel_wait_check_request(&wait, rq)) {
-                       intel_engine_remove_wait(rq->engine, &wait);
-                       goto restart;
-               }
        }
-
-       intel_engine_remove_wait(rq->engine, &wait);
-complete:
        __set_current_state(TASK_RUNNING);
-       if (flags & I915_WAIT_LOCKED)
-               remove_wait_queue(errq, &reset);
-       remove_wait_queue(&rq->execute, &exec);
-       trace_i915_request_wait_end(rq);
-
-       return timeout;
-}
 
-static void ring_retire_requests(struct intel_ring *ring)
-{
-       struct i915_request *request, *next;
+       dma_fence_remove_callback(&rq->fence, &wait.cb);
 
-       list_for_each_entry_safe(request, next,
-                                &ring->request_list, ring_link) {
-               if (!i915_request_completed(request))
-                       break;
-
-               i915_request_retire(request);
-       }
+out:
+       trace_i915_request_wait_end(rq);
+       return timeout;
 }
 
 void i915_retire_requests(struct drm_i915_private *i915)
index 90e9d170a0cd5e00645842e3955df058e4a8b338..40f3e8dcbdd51a2f02935092b52cd6c7f9348060 100644 (file)
@@ -30,7 +30,6 @@
 #include "i915_gem.h"
 #include "i915_scheduler.h"
 #include "i915_sw_fence.h"
-#include "i915_scheduler.h"
 
 #include <uapi/drm/i915_drm.h>
 
@@ -39,23 +38,34 @@ struct drm_i915_gem_object;
 struct i915_request;
 struct i915_timeline;
 
-struct intel_wait {
-       struct rb_node node;
-       struct task_struct *tsk;
-       struct i915_request *request;
-       u32 seqno;
-};
-
-struct intel_signal_node {
-       struct intel_wait wait;
-       struct list_head link;
-};
-
 struct i915_capture_list {
        struct i915_capture_list *next;
        struct i915_vma *vma;
 };
 
+enum {
+       /*
+        * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
+        *
+        * Set by __i915_request_submit() on handing over to HW, and cleared
+        * by __i915_request_unsubmit() if we preempt this request.
+        *
+        * Finally cleared for consistency on retiring the request, when
+        * we know the HW is no longer running this request.
+        *
+        * See i915_request_is_active()
+        */
+       I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
+
+       /*
+        * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
+        *
+        * Internal bookkeeping used by the breadcrumb code to track when
+        * a request is on the various signal_list.
+        */
+       I915_FENCE_FLAG_SIGNAL,
+};
+
 /**
  * Request queue structure.
  *
@@ -98,7 +108,7 @@ struct i915_request {
        struct intel_context *hw_context;
        struct intel_ring *ring;
        struct i915_timeline *timeline;
-       struct intel_signal_node signaling;
+       struct list_head signal_link;
 
        /*
         * The rcu epoch of when this request was allocated. Used to judiciously
@@ -117,7 +127,6 @@ struct i915_request {
         */
        struct i915_sw_fence submit;
        wait_queue_entry_t submitq;
-       wait_queue_head_t execute;
 
        /*
         * A list of everyone we wait upon, and everyone who waits upon us.
@@ -131,6 +140,13 @@ struct i915_request {
        struct i915_sched_node sched;
        struct i915_dependency dep;
 
+       /*
+        * A convenience pointer to the current breadcrumb value stored in
+        * the HW status page (or our timeline's local equivalent). The full
+        * path would be rq->hw_context->ring->timeline->hwsp_seqno.
+        */
+       const u32 *hwsp_seqno;
+
        /**
         * GEM sequence number associated with this request on the
         * global execution timeline. It is zero when the request is not
@@ -249,7 +265,7 @@ i915_request_put(struct i915_request *rq)
  * that it has passed the global seqno and the global seqno is unchanged
  * after the read, it is indeed complete).
  */
-static u32
+static inline u32
 i915_request_global_seqno(const struct i915_request *request)
 {
        return READ_ONCE(request->global_seqno);
@@ -271,6 +287,10 @@ void i915_request_skip(struct i915_request *request, int error);
 void __i915_request_unsubmit(struct i915_request *request);
 void i915_request_unsubmit(struct i915_request *request);
 
+/* Note: part of the intel_breadcrumbs family */
+bool i915_request_enable_breadcrumb(struct i915_request *request);
+void i915_request_cancel_breadcrumb(struct i915_request *request);
+
 long i915_request_wait(struct i915_request *rq,
                       unsigned int flags,
                       long timeout)
@@ -281,441 +301,106 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_ALL          BIT(3) /* used by i915_gem_object_wait() */
 #define I915_WAIT_FOR_IDLE_BOOST BIT(4)
 
-static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
-                                           u32 seqno);
-static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
-                                             u32 seqno);
-
-/**
- * Returns true if seq1 is later than seq2.
- */
-static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
-{
-       return (s32)(seq1 - seq2) >= 0;
-}
-
-/**
- * i915_request_started - check if the request has begun being executed
- * @rq: the request
- *
- * Returns true if the request has been submitted to hardware, and the hardware
- * has advanced passed the end of the previous request and so should be either
- * currently processing the request (though it may be preempted and so
- * not necessarily the next request to complete) or have completed the request.
- */
-static inline bool i915_request_started(const struct i915_request *rq)
-{
-       u32 seqno;
-
-       seqno = i915_request_global_seqno(rq);
-       if (!seqno) /* not yet submitted to HW */
-               return false;
-
-       return intel_engine_has_started(rq->engine, seqno);
-}
-
-static inline bool
-__i915_request_completed(const struct i915_request *rq, u32 seqno)
+static inline bool i915_request_signaled(const struct i915_request *rq)
 {
-       GEM_BUG_ON(!seqno);
-       return intel_engine_has_completed(rq->engine, seqno) &&
-               seqno == i915_request_global_seqno(rq);
+       /* The request may live longer than its HWSP, so check flags first! */
+       return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
 }
 
-static inline bool i915_request_completed(const struct i915_request *rq)
+static inline bool i915_request_is_active(const struct i915_request *rq)
 {
-       u32 seqno;
-
-       seqno = i915_request_global_seqno(rq);
-       if (!seqno)
-               return false;
-
-       return __i915_request_completed(rq, seqno);
+       return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 }
 
-void i915_retire_requests(struct drm_i915_private *i915);
-
-/*
- * We treat requests as fences. This is not be to confused with our
- * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
- * We use the fences to synchronize access from the CPU with activity on the
- * GPU, for example, we should not rewrite an object's PTE whilst the GPU
- * is reading them. We also track fences at a higher level to provide
- * implicit synchronisation around GEM objects, e.g. set-domain will wait
- * for outstanding GPU rendering before marking the object ready for CPU
- * access, or a pageflip will wait until the GPU is complete before showing
- * the frame on the scanout.
- *
- * In order to use a fence, the object must track the fence it needs to
- * serialise with. For example, GEM objects want to track both read and
- * write access so that we can perform concurrent read operations between
- * the CPU and GPU engines, as well as waiting for all rendering to
- * complete, or waiting for the last GPU user of a "fence register". The
- * object then embeds a #i915_gem_active to track the most recent (in
- * retirement order) request relevant for the desired mode of access.
- * The #i915_gem_active is updated with i915_gem_active_set() to track the
- * most recent fence request, typically this is done as part of
- * i915_vma_move_to_active().
- *
- * When the #i915_gem_active completes (is retired), it will
- * signal its completion to the owner through a callback as well as mark
- * itself as idle (i915_gem_active.request == NULL). The owner
- * can then perform any action, such as delayed freeing of an active
- * resource including itself.
- */
-struct i915_gem_active;
-
-typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
-                                  struct i915_request *);
-
-struct i915_gem_active {
-       struct i915_request __rcu *request;
-       struct list_head link;
-       i915_gem_retire_fn retire;
-};
-
-void i915_gem_retire_noop(struct i915_gem_active *,
-                         struct i915_request *request);
-
 /**
- * init_request_active - prepares the activity tracker for use
- * @active - the active tracker
- * @func - a callback when then the tracker is retired (becomes idle),
- *         can be NULL
- *
- * init_request_active() prepares the embedded @active struct for use as
- * an activity tracker, that is for tracking the last known active request
- * associated with it. When the last request becomes idle, when it is retired
- * after completion, the optional callback @func is invoked.
- */
-static inline void
-init_request_active(struct i915_gem_active *active,
-                   i915_gem_retire_fn retire)
-{
-       RCU_INIT_POINTER(active->request, NULL);
-       INIT_LIST_HEAD(&active->link);
-       active->retire = retire ?: i915_gem_retire_noop;
-}
-
-/**
- * i915_gem_active_set - updates the tracker to watch the current request
- * @active - the active tracker
- * @request - the request to watch
- *
- * i915_gem_active_set() watches the given @request for completion. Whilst
- * that @request is busy, the @active reports busy. When that @request is
- * retired, the @active tracker is updated to report idle.
- */
-static inline void
-i915_gem_active_set(struct i915_gem_active *active,
-                   struct i915_request *request)
-{
-       list_move(&active->link, &request->active_list);
-       rcu_assign_pointer(active->request, request);
-}
-
-/**
- * i915_gem_active_set_retire_fn - updates the retirement callback
- * @active - the active tracker
- * @fn - the routine called when the request is retired
- * @mutex - struct_mutex used to guard retirements
- *
- * i915_gem_active_set_retire_fn() updates the function pointer that
- * is called when the final request associated with the @active tracker
- * is retired.
+ * Returns true if seq1 is later than seq2.
  */
-static inline void
-i915_gem_active_set_retire_fn(struct i915_gem_active *active,
-                             i915_gem_retire_fn fn,
-                             struct mutex *mutex)
+static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 {
-       lockdep_assert_held(mutex);
-       active->retire = fn ?: i915_gem_retire_noop;
+       return (s32)(seq1 - seq2) >= 0;
 }
 
-static inline struct i915_request *
-__i915_gem_active_peek(const struct i915_gem_active *active)
+static inline u32 __hwsp_seqno(const struct i915_request *rq)
 {
-       /*
-        * Inside the error capture (running with the driver in an unknown
-        * state), we want to bend the rules slightly (a lot).
-        *
-        * Work is in progress to make it safer, in the meantime this keeps
-        * the known issue from spamming the logs.
-        */
-       return rcu_dereference_protected(active->request, 1);
+       return READ_ONCE(*rq->hwsp_seqno);
 }
 
 /**
- * i915_gem_active_raw - return the active request
- * @active - the active tracker
+ * hwsp_seqno - the current breadcrumb value in the HW status page
+ * @rq: the request, to chase the relevant HW status page
  *
- * i915_gem_active_raw() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the caller
- * must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
-{
-       return rcu_dereference_protected(active->request,
-                                        lockdep_is_held(mutex));
-}
-
-/**
- * i915_gem_active_peek - report the active request being monitored
- * @active - the active tracker
+ * The emphasis in naming here is that hwsp_seqno() is not a property of the
+ * request, but an indication of the current HW state (associated with this
+ * request). Its value will change as the GPU executes more requests.
  *
- * i915_gem_active_peek() returns the current request being tracked if
- * still active, or NULL. It does not obtain a reference on the request
- * for the caller, so the caller must hold struct_mutex.
+ * Returns the current breadcrumb value in the associated HW status page (or
+ * the local timeline's equivalent) for this request. The request itself
+ * has the associated breadcrumb value of rq->fence.seqno, when the HW
+ * status page has that breadcrumb or later, this request is complete.
  */
-static inline struct i915_request *
-i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
+static inline u32 hwsp_seqno(const struct i915_request *rq)
 {
-       struct i915_request *request;
+       u32 seqno;
 
-       request = i915_gem_active_raw(active, mutex);
-       if (!request || i915_request_completed(request))
-               return NULL;
+       rcu_read_lock(); /* the HWSP may be freed at runtime */
+       seqno = __hwsp_seqno(rq);
+       rcu_read_unlock();
 
-       return request;
+       return seqno;
 }
 
-/**
- * i915_gem_active_get - return a reference to the active request
- * @active - the active tracker
- *
- * i915_gem_active_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
+static inline bool __i915_request_has_started(const struct i915_request *rq)
 {
-       return i915_request_get(i915_gem_active_peek(active, mutex));
+       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
 }
 
 /**
- * __i915_gem_active_get_rcu - return a reference to the active request
- * @active - the active tracker
- *
- * __i915_gem_active_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold the RCU read lock, but
- * the returned pointer is safe to use outside of RCU.
- */
-static inline struct i915_request *
-__i915_gem_active_get_rcu(const struct i915_gem_active *active)
-{
-       /*
-        * Performing a lockless retrieval of the active request is super
-        * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
-        * slab of request objects will not be freed whilst we hold the
-        * RCU read lock. It does not guarantee that the request itself
-        * will not be freed and then *reused*. Viz,
-        *
-        * Thread A                     Thread B
-        *
-        * rq = active.request
-        *                              retire(rq) -> free(rq);
-        *                              (rq is now first on the slab freelist)
-        *                              active.request = NULL
-        *
-        *                              rq = new submission on a new object
-        * ref(rq)
-        *
-        * To prevent the request from being reused whilst the caller
-        * uses it, we take a reference like normal. Whilst acquiring
-        * the reference we check that it is not in a destroyed state
-        * (refcnt == 0). That prevents the request being reallocated
-        * whilst the caller holds on to it. To check that the request
-        * was not reallocated as we acquired the reference we have to
-        * check that our request remains the active request across
-        * the lookup, in the same manner as a seqlock. The visibility
-        * of the pointer versus the reference counting is controlled
-        * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
-        *
-        * In the middle of all that, we inspect whether the request is
-        * complete. Retiring is lazy so the request may be completed long
-        * before the active tracker is updated. Querying whether the
-        * request is complete is far cheaper (as it involves no locked
-        * instructions setting cachelines to exclusive) than acquiring
-        * the reference, so we do it first. The RCU read lock ensures the
-        * pointer dereference is valid, but does not ensure that the
-        * seqno nor HWS is the right one! However, if the request was
-        * reallocated, that means the active tracker's request was complete.
-        * If the new request is also complete, then both are and we can
-        * just report the active tracker is idle. If the new request is
-        * incomplete, then we acquire a reference on it and check that
-        * it remained the active request.
-        *
-        * It is then imperative that we do not zero the request on
-        * reallocation, so that we can chase the dangling pointers!
-        * See i915_request_alloc().
-        */
-       do {
-               struct i915_request *request;
-
-               request = rcu_dereference(active->request);
-               if (!request || i915_request_completed(request))
-                       return NULL;
-
-               /*
-                * An especially silly compiler could decide to recompute the
-                * result of i915_request_completed, more specifically
-                * re-emit the load for request->fence.seqno. A race would catch
-                * a later seqno value, which could flip the result from true to
-                * false. Which means part of the instructions below might not
-                * be executed, while later on instructions are executed. Due to
-                * barriers within the refcounting the inconsistency can't reach
-                * past the call to i915_request_get_rcu, but not executing
-                * that while still executing i915_request_put() creates
-                * havoc enough.  Prevent this with a compiler barrier.
-                */
-               barrier();
-
-               request = i915_request_get_rcu(request);
-
-               /*
-                * What stops the following rcu_access_pointer() from occurring
-                * before the above i915_request_get_rcu()? If we were
-                * to read the value before pausing to get the reference to
-                * the request, we may not notice a change in the active
-                * tracker.
-                *
-                * The rcu_access_pointer() is a mere compiler barrier, which
-                * means both the CPU and compiler are free to perform the
-                * memory read without constraint. The compiler only has to
-                * ensure that any operations after the rcu_access_pointer()
-                * occur afterwards in program order. This means the read may
-                * be performed earlier by an out-of-order CPU, or adventurous
-                * compiler.
-                *
-                * The atomic operation at the heart of
-                * i915_request_get_rcu(), see dma_fence_get_rcu(), is
-                * atomic_inc_not_zero() which is only a full memory barrier
-                * when successful. That is, if i915_request_get_rcu()
-                * returns the request (and so with the reference counted
-                * incremented) then the following read for rcu_access_pointer()
-                * must occur after the atomic operation and so confirm
-                * that this request is the one currently being tracked.
-                *
-                * The corresponding write barrier is part of
-                * rcu_assign_pointer().
-                */
-               if (!request || request == rcu_access_pointer(active->request))
-                       return rcu_pointer_handoff(request);
-
-               i915_request_put(request);
-       } while (1);
-}
-
-/**
- * i915_gem_active_get_unlocked - return a reference to the active request
- * @active - the active tracker
- *
- * i915_gem_active_get_unlocked() returns a reference to the active request,
- * or NULL if the active tracker is idle. The reference is obtained under RCU,
- * so no locking is required by the caller.
+ * i915_request_started - check if the request has begun being executed
+ * @rq: the request
  *
- * The reference should be freed with i915_request_put().
+ * Returns true if the request has been submitted to hardware, and the hardware
+ * has advanced passed the end of the previous request and so should be either
+ * currently processing the request (though it may be preempted and so
+ * not necessarily the next request to complete) or have completed the request.
  */
-static inline struct i915_request *
-i915_gem_active_get_unlocked(const struct i915_gem_active *active)
+static inline bool i915_request_started(const struct i915_request *rq)
 {
-       struct i915_request *request;
+       if (i915_request_signaled(rq))
+               return true;
 
-       rcu_read_lock();
-       request = __i915_gem_active_get_rcu(active);
-       rcu_read_unlock();
-
-       return request;
+       /* Remember: started but may have since been preempted! */
+       return __i915_request_has_started(rq);
 }
 
 /**
- * i915_gem_active_isset - report whether the active tracker is assigned
- * @active - the active tracker
+ * i915_request_is_running - check if the request may actually be executing
+ * @rq: the request
  *
- * i915_gem_active_isset() returns true if the active tracker is currently
- * assigned to a request. Due to the lazy retiring, that request may be idle
- * and this may report stale information.
+ * Returns true if the request is currently submitted to hardware, has passed
+ * its start point (i.e. the context is setup and not busywaiting). Note that
+ * it may no longer be running by the time the function returns!
  */
-static inline bool
-i915_gem_active_isset(const struct i915_gem_active *active)
+static inline bool i915_request_is_running(const struct i915_request *rq)
 {
-       return rcu_access_pointer(active->request);
+       if (!i915_request_is_active(rq))
+               return false;
+
+       return __i915_request_has_started(rq);
 }
 
-/**
- * i915_gem_active_wait - waits until the request is completed
- * @active - the active request on which to wait
- * @flags - how to wait
- * @timeout - how long to wait at most
- * @rps - userspace client to charge for a waitboost
- *
- * i915_gem_active_wait() waits until the request is completed before
- * returning, without requiring any locks to be held. Note that it does not
- * retire any requests before returning.
- *
- * This function relies on RCU in order to acquire the reference to the active
- * request without holding any locks. See __i915_gem_active_get_rcu() for the
- * glory details on how that is managed. Once the reference is acquired, we
- * can then wait upon the request, and afterwards release our reference,
- * free of any locking.
- *
- * This function wraps i915_request_wait(), see it for the full details on
- * the arguments.
- *
- * Returns 0 if successful, or a negative error code.
- */
-static inline int
-i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
+static inline bool i915_request_completed(const struct i915_request *rq)
 {
-       struct i915_request *request;
-       long ret = 0;
-
-       request = i915_gem_active_get_unlocked(active);
-       if (request) {
-               ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
-               i915_request_put(request);
-       }
+       if (i915_request_signaled(rq))
+               return true;
 
-       return ret < 0 ? ret : 0;
+       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
 }
 
-/**
- * i915_gem_active_retire - waits until the request is retired
- * @active - the active request on which to wait
- *
- * i915_gem_active_retire() waits until the request is completed,
- * and then ensures that at least the retirement handler for this
- * @active tracker is called before returning. If the @active
- * tracker is idle, the function returns immediately.
- */
-static inline int __must_check
-i915_gem_active_retire(struct i915_gem_active *active,
-                      struct mutex *mutex)
+static inline void i915_request_mark_complete(struct i915_request *rq)
 {
-       struct i915_request *request;
-       long ret;
-
-       request = i915_gem_active_raw(active, mutex);
-       if (!request)
-               return 0;
-
-       ret = i915_request_wait(request,
-                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
-                               MAX_SCHEDULE_TIMEOUT);
-       if (ret < 0)
-               return ret;
-
-       list_del_init(&active->link);
-       RCU_INIT_POINTER(active->request, NULL);
-
-       active->retire(active, request);
-
-       return 0;
+       rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
 }
 
-#define for_each_active(mask, idx) \
-       for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
+void i915_retire_requests(struct drm_i915_private *i915);
 
 #endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
new file mode 100644 (file)
index 0000000..0e0ddf2
--- /dev/null
@@ -0,0 +1,1349 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2008-2018 Intel Corporation
+ */
+
+#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
+
+#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "i915_reset.h"
+
+#include "intel_guc.h"
+
+#define RESET_MAX_RETRIES 3
+
+/* XXX How to handle concurrent GGTT updates using tiling registers? */
+#define RESET_UNDER_STOP_MACHINE 0
+
+static void engine_skip_context(struct i915_request *rq)
+{
+       struct intel_engine_cs *engine = rq->engine;
+       struct i915_gem_context *hung_ctx = rq->gem_context;
+       struct i915_timeline *timeline = rq->timeline;
+
+       lockdep_assert_held(&engine->timeline.lock);
+       GEM_BUG_ON(timeline == &engine->timeline);
+
+       spin_lock(&timeline->lock);
+
+       if (i915_request_is_active(rq)) {
+               list_for_each_entry_continue(rq,
+                                            &engine->timeline.requests, link)
+                       if (rq->gem_context == hung_ctx)
+                               i915_request_skip(rq, -EIO);
+       }
+
+       list_for_each_entry(rq, &timeline->requests, link)
+               i915_request_skip(rq, -EIO);
+
+       spin_unlock(&timeline->lock);
+}
+
+static void client_mark_guilty(struct drm_i915_file_private *file_priv,
+                              const struct i915_gem_context *ctx)
+{
+       unsigned int score;
+       unsigned long prev_hang;
+
+       if (i915_gem_context_is_banned(ctx))
+               score = I915_CLIENT_SCORE_CONTEXT_BAN;
+       else
+               score = 0;
+
+       prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+       if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+               score += I915_CLIENT_SCORE_HANG_FAST;
+
+       if (score) {
+               atomic_add(score, &file_priv->ban_score);
+
+               DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+                                ctx->name, score,
+                                atomic_read(&file_priv->ban_score));
+       }
+}
+
+static bool context_mark_guilty(struct i915_gem_context *ctx)
+{
+       unsigned int score;
+       bool banned, bannable;
+
+       atomic_inc(&ctx->guilty_count);
+
+       bannable = i915_gem_context_is_bannable(ctx);
+       score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+       banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+
+       /* Cool contexts don't accumulate client ban score */
+       if (!bannable)
+               return false;
+
+       if (banned) {
+               DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+                                ctx->name, atomic_read(&ctx->guilty_count),
+                                score);
+               i915_gem_context_set_banned(ctx);
+       }
+
+       if (!IS_ERR_OR_NULL(ctx->file_priv))
+               client_mark_guilty(ctx->file_priv, ctx);
+
+       return banned;
+}
+
+static void context_mark_innocent(struct i915_gem_context *ctx)
+{
+       atomic_inc(&ctx->active_count);
+}
+
+void i915_reset_request(struct i915_request *rq, bool guilty)
+{
+       lockdep_assert_held(&rq->engine->timeline.lock);
+       GEM_BUG_ON(i915_request_completed(rq));
+
+       if (guilty) {
+               i915_request_skip(rq, -EIO);
+               if (context_mark_guilty(rq->gem_context))
+                       engine_skip_context(rq);
+       } else {
+               dma_fence_set_error(&rq->fence, -EAGAIN);
+               context_mark_innocent(rq->gem_context);
+       }
+}
+
+static void gen3_stop_engine(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       const u32 base = engine->mmio_base;
+
+       if (intel_engine_stop_cs(engine))
+               DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
+
+       I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
+       POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
+
+       I915_WRITE_FW(RING_HEAD(base), 0);
+       I915_WRITE_FW(RING_TAIL(base), 0);
+       POSTING_READ_FW(RING_TAIL(base));
+
+       /* The ring must be empty before it is disabled */
+       I915_WRITE_FW(RING_CTL(base), 0);
+
+       /* Check acts as a post */
+       if (I915_READ_FW(RING_HEAD(base)) != 0)
+               DRM_DEBUG_DRIVER("%s: ring head not parked\n",
+                                engine->name);
+}
+
+static void i915_stop_engines(struct drm_i915_private *i915,
+                             unsigned int engine_mask)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       if (INTEL_GEN(i915) < 3)
+               return;
+
+       for_each_engine_masked(engine, i915, engine_mask, id)
+               gen3_stop_engine(engine);
+}
+
+static bool i915_in_reset(struct pci_dev *pdev)
+{
+       u8 gdrst;
+
+       pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+       return gdrst & GRDOM_RESET_STATUS;
+}
+
+static int i915_do_reset(struct drm_i915_private *i915,
+                        unsigned int engine_mask,
+                        unsigned int retry)
+{
+       struct pci_dev *pdev = i915->drm.pdev;
+       int err;
+
+       /* Assert reset for at least 20 usec, and wait for acknowledgement. */
+       pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+       udelay(50);
+       err = wait_for_atomic(i915_in_reset(pdev), 50);
+
+       /* Clear the reset request. */
+       pci_write_config_byte(pdev, I915_GDRST, 0);
+       udelay(50);
+       if (!err)
+               err = wait_for_atomic(!i915_in_reset(pdev), 50);
+
+       return err;
+}
+
+static bool g4x_reset_complete(struct pci_dev *pdev)
+{
+       u8 gdrst;
+
+       pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+       return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int g33_do_reset(struct drm_i915_private *i915,
+                       unsigned int engine_mask,
+                       unsigned int retry)
+{
+       struct pci_dev *pdev = i915->drm.pdev;
+
+       pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+       return wait_for_atomic(g4x_reset_complete(pdev), 50);
+}
+
+static int g4x_do_reset(struct drm_i915_private *dev_priv,
+                       unsigned int engine_mask,
+                       unsigned int retry)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       int ret;
+
+       /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+       I915_WRITE_FW(VDECCLK_GATE_D,
+                     I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+       POSTING_READ_FW(VDECCLK_GATE_D);
+
+       pci_write_config_byte(pdev, I915_GDRST,
+                             GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+       ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+               goto out;
+       }
+
+       pci_write_config_byte(pdev, I915_GDRST,
+                             GRDOM_RENDER | GRDOM_RESET_ENABLE);
+       ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+               goto out;
+       }
+
+out:
+       pci_write_config_byte(pdev, I915_GDRST, 0);
+
+       I915_WRITE_FW(VDECCLK_GATE_D,
+                     I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+       POSTING_READ_FW(VDECCLK_GATE_D);
+
+       return ret;
+}
+
+static int ironlake_do_reset(struct drm_i915_private *dev_priv,
+                            unsigned int engine_mask,
+                            unsigned int retry)
+{
+       int ret;
+
+       I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+       ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+                                          ILK_GRDOM_RESET_ENABLE, 0,
+                                          5000, 0,
+                                          NULL);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+               goto out;
+       }
+
+       I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+       ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+                                          ILK_GRDOM_RESET_ENABLE, 0,
+                                          5000, 0,
+                                          NULL);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+               goto out;
+       }
+
+out:
+       I915_WRITE_FW(ILK_GDSR, 0);
+       POSTING_READ_FW(ILK_GDSR);
+       return ret;
+}
+
+/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
+static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+                               u32 hw_domain_mask)
+{
+       int err;
+
+       /*
+        * GEN6_GDRST is not in the gt power well, no need to check
+        * for fifo space for the write or forcewake the chip for
+        * the read
+        */
+       I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
+
+       /* Wait for the device to ack the reset requests */
+       err = __intel_wait_for_register_fw(dev_priv,
+                                          GEN6_GDRST, hw_domain_mask, 0,
+                                          500, 0,
+                                          NULL);
+       if (err)
+               DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
+                                hw_domain_mask);
+
+       return err;
+}
+
+static int gen6_reset_engines(struct drm_i915_private *i915,
+                             unsigned int engine_mask,
+                             unsigned int retry)
+{
+       struct intel_engine_cs *engine;
+       const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+               [RCS] = GEN6_GRDOM_RENDER,
+               [BCS] = GEN6_GRDOM_BLT,
+               [VCS] = GEN6_GRDOM_MEDIA,
+               [VCS2] = GEN8_GRDOM_MEDIA2,
+               [VECS] = GEN6_GRDOM_VECS,
+       };
+       u32 hw_mask;
+
+       if (engine_mask == ALL_ENGINES) {
+               hw_mask = GEN6_GRDOM_FULL;
+       } else {
+               unsigned int tmp;
+
+               hw_mask = 0;
+               for_each_engine_masked(engine, i915, engine_mask, tmp)
+                       hw_mask |= hw_engine_mask[engine->id];
+       }
+
+       return gen6_hw_domain_reset(i915, hw_mask);
+}
+
+static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
+                         struct intel_engine_cs *engine)
+{
+       u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+       i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
+       u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
+       i915_reg_t sfc_usage;
+       u32 sfc_usage_bit;
+       u32 sfc_reset_bit;
+
+       switch (engine->class) {
+       case VIDEO_DECODE_CLASS:
+               if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
+                       return 0;
+
+               sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
+               sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+
+               sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
+               sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
+
+               sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
+               sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
+               sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
+               break;
+
+       case VIDEO_ENHANCEMENT_CLASS:
+               sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
+               sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+
+               sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
+               sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
+
+               sfc_usage = GEN11_VECS_SFC_USAGE(engine);
+               sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
+               sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
+               break;
+
+       default:
+               return 0;
+       }
+
+       /*
+        * Tell the engine that a software reset is going to happen. The engine
+        * will then try to force lock the SFC (if currently locked, it will
+        * remain so until we tell the engine it is safe to unlock; if currently
+        * unlocked, it will ignore this and all new lock requests). If SFC
+        * ends up being locked to the engine we want to reset, we have to reset
+        * it as well (we will unlock it once the reset sequence is completed).
+        */
+       I915_WRITE_FW(sfc_forced_lock,
+                     I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
+
+       if (__intel_wait_for_register_fw(dev_priv,
+                                        sfc_forced_lock_ack,
+                                        sfc_forced_lock_ack_bit,
+                                        sfc_forced_lock_ack_bit,
+                                        1000, 0, NULL)) {
+               DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+               return 0;
+       }
+
+       if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
+               return sfc_reset_bit;
+
+       return 0;
+}
+
+static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
+                            struct intel_engine_cs *engine)
+{
+       u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+       i915_reg_t sfc_forced_lock;
+       u32 sfc_forced_lock_bit;
+
+       switch (engine->class) {
+       case VIDEO_DECODE_CLASS:
+               if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
+                       return;
+
+               sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
+               sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+               break;
+
+       case VIDEO_ENHANCEMENT_CLASS:
+               sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
+               sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+               break;
+
+       default:
+               return;
+       }
+
+       I915_WRITE_FW(sfc_forced_lock,
+                     I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
+}
+
+static int gen11_reset_engines(struct drm_i915_private *i915,
+                              unsigned int engine_mask,
+                              unsigned int retry)
+{
+       const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+               [RCS] = GEN11_GRDOM_RENDER,
+               [BCS] = GEN11_GRDOM_BLT,
+               [VCS] = GEN11_GRDOM_MEDIA,
+               [VCS2] = GEN11_GRDOM_MEDIA2,
+               [VCS3] = GEN11_GRDOM_MEDIA3,
+               [VCS4] = GEN11_GRDOM_MEDIA4,
+               [VECS] = GEN11_GRDOM_VECS,
+               [VECS2] = GEN11_GRDOM_VECS2,
+       };
+       struct intel_engine_cs *engine;
+       unsigned int tmp;
+       u32 hw_mask;
+       int ret;
+
+       BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
+
+       if (engine_mask == ALL_ENGINES) {
+               hw_mask = GEN11_GRDOM_FULL;
+       } else {
+               hw_mask = 0;
+               for_each_engine_masked(engine, i915, engine_mask, tmp) {
+                       hw_mask |= hw_engine_mask[engine->id];
+                       hw_mask |= gen11_lock_sfc(i915, engine);
+               }
+       }
+
+       ret = gen6_hw_domain_reset(i915, hw_mask);
+
+       if (engine_mask != ALL_ENGINES)
+               for_each_engine_masked(engine, i915, engine_mask, tmp)
+                       gen11_unlock_sfc(i915, engine);
+
+       return ret;
+}
+
+static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+                     _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+       ret = __intel_wait_for_register_fw(dev_priv,
+                                          RING_RESET_CTL(engine->mmio_base),
+                                          RESET_CTL_READY_TO_RESET,
+                                          RESET_CTL_READY_TO_RESET,
+                                          700, 0,
+                                          NULL);
+       if (ret)
+               DRM_ERROR("%s: reset request timeout\n", engine->name);
+
+       return ret;
+}
+
+static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+                     _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+}
+
+static int gen8_reset_engines(struct drm_i915_private *i915,
+                             unsigned int engine_mask,
+                             unsigned int retry)
+{
+       struct intel_engine_cs *engine;
+       const bool reset_non_ready = retry >= 1;
+       unsigned int tmp;
+       int ret;
+
+       for_each_engine_masked(engine, i915, engine_mask, tmp) {
+               ret = gen8_engine_reset_prepare(engine);
+               if (ret && !reset_non_ready)
+                       goto skip_reset;
+
+               /*
+                * If this is not the first failed attempt to prepare,
+                * we decide to proceed anyway.
+                *
+                * By doing so we risk context corruption and with
+                * some gens (kbl), possible system hang if reset
+                * happens during active bb execution.
+                *
+                * We rather take context corruption instead of
+                * failed reset with a wedged driver/gpu. And
+                * active bb execution case should be covered by
+                * i915_stop_engines we have before the reset.
+                */
+       }
+
+       if (INTEL_GEN(i915) >= 11)
+               ret = gen11_reset_engines(i915, engine_mask, retry);
+       else
+               ret = gen6_reset_engines(i915, engine_mask, retry);
+
+skip_reset:
+       for_each_engine_masked(engine, i915, engine_mask, tmp)
+               gen8_engine_reset_cancel(engine);
+
+       return ret;
+}
+
+typedef int (*reset_func)(struct drm_i915_private *,
+                         unsigned int engine_mask,
+                         unsigned int retry);
+
+static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
+{
+       if (!i915_modparams.reset)
+               return NULL;
+
+       if (INTEL_GEN(i915) >= 8)
+               return gen8_reset_engines;
+       else if (INTEL_GEN(i915) >= 6)
+               return gen6_reset_engines;
+       else if (INTEL_GEN(i915) >= 5)
+               return ironlake_do_reset;
+       else if (IS_G4X(i915))
+               return g4x_do_reset;
+       else if (IS_G33(i915) || IS_PINEVIEW(i915))
+               return g33_do_reset;
+       else if (INTEL_GEN(i915) >= 3)
+               return i915_do_reset;
+       else
+               return NULL;
+}
+
+int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
+{
+       const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
+       reset_func reset;
+       int ret = -ETIMEDOUT;
+       int retry;
+
+       reset = intel_get_gpu_reset(i915);
+       if (!reset)
+               return -ENODEV;
+
+       /*
+        * If the power well sleeps during the reset, the reset
+        * request may be dropped and never completes (causing -EIO).
+        */
+       intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+       for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
+               /*
+                * We stop engines, otherwise we might get failed reset and a
+                * dead gpu (on elk). Also as modern gpu as kbl can suffer
+                * from system hang if batchbuffer is progressing when
+                * the reset is issued, regardless of READY_TO_RESET ack.
+                * Thus assume it is best to stop engines on all gens
+                * where we have a gpu reset.
+                *
+                * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+                *
+                * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+                *
+                * FIXME: Wa for more modern gens needs to be validated
+                */
+               i915_stop_engines(i915, engine_mask);
+
+               GEM_TRACE("engine_mask=%x\n", engine_mask);
+               preempt_disable();
+               ret = reset(i915, engine_mask, retry);
+               preempt_enable();
+       }
+       intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+
+       return ret;
+}
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915)
+{
+       if (USES_GUC(i915))
+               return false;
+
+       return intel_get_gpu_reset(i915);
+}
+
+bool intel_has_reset_engine(struct drm_i915_private *i915)
+{
+       return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
+}
+
+int intel_reset_guc(struct drm_i915_private *i915)
+{
+       u32 guc_domain =
+               INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+       int ret;
+
+       GEM_BUG_ON(!HAS_GUC(i915));
+
+       intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+       ret = gen6_hw_domain_reset(i915, guc_domain);
+       intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+
+       return ret;
+}
+
+/*
+ * Ensure irq handler finishes, and not run again.
+ * Also return the active request so that we only search for it once.
+ */
+static void reset_prepare_engine(struct intel_engine_cs *engine)
+{
+       /*
+        * During the reset sequence, we must prevent the engine from
+        * entering RC6. As the context state is undefined until we restart
+        * the engine, if it does enter RC6 during the reset, the state
+        * written to the powercontext is undefined and so we may lose
+        * GPU state upon resume, i.e. fail to restart after a reset.
+        */
+       intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+       engine->reset.prepare(engine);
+}
+
+static void reset_prepare(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, i915, id)
+               reset_prepare_engine(engine);
+
+       intel_uc_sanitize(i915);
+}
+
+static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int err;
+
+       /*
+        * Everything depends on having the GTT running, so we need to start
+        * there.
+        */
+       err = i915_ggtt_enable_hw(i915);
+       if (err)
+               return err;
+
+       for_each_engine(engine, i915, id)
+               intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id));
+
+       i915_gem_restore_fences(i915);
+
+       return err;
+}
+
+static void reset_finish_engine(struct intel_engine_cs *engine)
+{
+       engine->reset.finish(engine);
+       intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+}
+
+struct i915_gpu_restart {
+       struct work_struct work;
+       struct drm_i915_private *i915;
+};
+
+static void restart_work(struct work_struct *work)
+{
+       struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
+       struct drm_i915_private *i915 = arg->i915;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+
+       wakeref = intel_runtime_pm_get(i915);
+       mutex_lock(&i915->drm.struct_mutex);
+       WRITE_ONCE(i915->gpu_error.restart, NULL);
+
+       for_each_engine(engine, i915, id) {
+               struct i915_request *rq;
+
+               /*
+                * Ostensibily, we always want a context loaded for powersaving,
+                * so if the engine is idle after the reset, send a request
+                * to load our scratch kernel_context.
+                */
+               if (!intel_engine_is_idle(engine))
+                       continue;
+
+               rq = i915_request_alloc(engine, i915->kernel_context);
+               if (!IS_ERR(rq))
+                       i915_request_add(rq);
+       }
+
+       mutex_unlock(&i915->drm.struct_mutex);
+       intel_runtime_pm_put(i915, wakeref);
+
+       kfree(arg);
+}
+
+static void reset_finish(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, i915, id)
+               reset_finish_engine(engine);
+}
+
+static void reset_restart(struct drm_i915_private *i915)
+{
+       struct i915_gpu_restart *arg;
+
+       /*
+        * Following the reset, ensure that we always reload context for
+        * powersaving, and to correct engine->last_retired_context. Since
+        * this requires us to submit a request, queue a worker to do that
+        * task for us to evade any locking here.
+        */
+       if (READ_ONCE(i915->gpu_error.restart))
+               return;
+
+       arg = kmalloc(sizeof(*arg), GFP_KERNEL);
+       if (arg) {
+               arg->i915 = i915;
+               INIT_WORK(&arg->work, restart_work);
+
+               WRITE_ONCE(i915->gpu_error.restart, arg);
+               queue_work(i915->wq, &arg->work);
+       }
+}
+
+static void nop_submit_request(struct i915_request *request)
+{
+       struct intel_engine_cs *engine = request->engine;
+       unsigned long flags;
+
+       GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
+                 engine->name, request->fence.context, request->fence.seqno);
+       dma_fence_set_error(&request->fence, -EIO);
+
+       spin_lock_irqsave(&engine->timeline.lock, flags);
+       __i915_request_submit(request);
+       i915_request_mark_complete(request);
+       intel_engine_write_global_seqno(engine, request->global_seqno);
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+       intel_engine_queue_breadcrumbs(engine);
+}
+
+void i915_gem_set_wedged(struct drm_i915_private *i915)
+{
+       struct i915_gpu_error *error = &i915->gpu_error;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       mutex_lock(&error->wedge_mutex);
+       if (test_bit(I915_WEDGED, &error->flags)) {
+               mutex_unlock(&error->wedge_mutex);
+               return;
+       }
+
+       if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
+               struct drm_printer p = drm_debug_printer(__func__);
+
+               for_each_engine(engine, i915, id)
+                       intel_engine_dump(engine, &p, "%s\n", engine->name);
+       }
+
+       GEM_TRACE("start\n");
+
+       /*
+        * First, stop submission to hw, but do not yet complete requests by
+        * rolling the global seqno forward (since this would complete requests
+        * for which we haven't set the fence error to EIO yet).
+        */
+       for_each_engine(engine, i915, id)
+               reset_prepare_engine(engine);
+
+       /* Even if the GPU reset fails, it should still stop the engines */
+       if (INTEL_GEN(i915) >= 5)
+               intel_gpu_reset(i915, ALL_ENGINES);
+
+       for_each_engine(engine, i915, id) {
+               engine->submit_request = nop_submit_request;
+               engine->schedule = NULL;
+       }
+       i915->caps.scheduler = 0;
+
+       /*
+        * Make sure no request can slip through without getting completed by
+        * either this call here to intel_engine_write_global_seqno, or the one
+        * in nop_submit_request.
+        */
+       synchronize_rcu();
+
+       /* Mark all executing requests as skipped */
+       for_each_engine(engine, i915, id)
+               engine->cancel_requests(engine);
+
+       for_each_engine(engine, i915, id) {
+               reset_finish_engine(engine);
+               intel_engine_signal_breadcrumbs(engine);
+       }
+
+       smp_mb__before_atomic();
+       set_bit(I915_WEDGED, &error->flags);
+
+       GEM_TRACE("end\n");
+       mutex_unlock(&error->wedge_mutex);
+
+       wake_up_all(&error->reset_queue);
+}
+
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+{
+       struct i915_gpu_error *error = &i915->gpu_error;
+       struct i915_timeline *tl;
+       bool ret = false;
+
+       if (!test_bit(I915_WEDGED, &error->flags))
+               return true;
+
+       if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
+               return false;
+
+       mutex_lock(&error->wedge_mutex);
+
+       GEM_TRACE("start\n");
+
+       /*
+        * Before unwedging, make sure that all pending operations
+        * are flushed and errored out - we may have requests waiting upon
+        * third party fences. We marked all inflight requests as EIO, and
+        * every execbuf since returned EIO, for consistency we want all
+        * the currently pending requests to also be marked as EIO, which
+        * is done inside our nop_submit_request - and so we must wait.
+        *
+        * No more can be submitted until we reset the wedged bit.
+        */
+       mutex_lock(&i915->gt.timelines.mutex);
+       list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
+               struct i915_request *rq;
+               long timeout;
+
+               rq = i915_active_request_get_unlocked(&tl->last_request);
+               if (!rq)
+                       continue;
+
+               /*
+                * We can't use our normal waiter as we want to
+                * avoid recursively trying to handle the current
+                * reset. The basic dma_fence_default_wait() installs
+                * a callback for dma_fence_signal(), which is
+                * triggered by our nop handler (indirectly, the
+                * callback enables the signaler thread which is
+                * woken by the nop_submit_request() advancing the seqno
+                * and when the seqno passes the fence, the signaler
+                * then signals the fence waking us up).
+                */
+               timeout = dma_fence_default_wait(&rq->fence, true,
+                                                MAX_SCHEDULE_TIMEOUT);
+               i915_request_put(rq);
+               if (timeout < 0) {
+                       mutex_unlock(&i915->gt.timelines.mutex);
+                       goto unlock;
+               }
+       }
+       mutex_unlock(&i915->gt.timelines.mutex);
+
+       intel_engines_sanitize(i915, false);
+
+       /*
+        * Undo nop_submit_request. We prevent all new i915 requests from
+        * being queued (by disallowing execbuf whilst wedged) so having
+        * waited for all active requests above, we know the system is idle
+        * and do not have to worry about a thread being inside
+        * engine->submit_request() as we swap over. So unlike installing
+        * the nop_submit_request on reset, we can do this from normal
+        * context and do not require stop_machine().
+        */
+       intel_engines_reset_default_submission(i915);
+
+       GEM_TRACE("end\n");
+
+       smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
+       clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+       ret = true;
+unlock:
+       mutex_unlock(&i915->gpu_error.wedge_mutex);
+
+       return ret;
+}
+
+struct __i915_reset {
+       struct drm_i915_private *i915;
+       unsigned int stalled_mask;
+};
+
+static int __i915_reset__BKL(void *data)
+{
+       struct __i915_reset *arg = data;
+       int err;
+
+       err = intel_gpu_reset(arg->i915, ALL_ENGINES);
+       if (err)
+               return err;
+
+       return gt_reset(arg->i915, arg->stalled_mask);
+}
+
+#if RESET_UNDER_STOP_MACHINE
+/*
+ * XXX An alternative to using stop_machine would be to park only the
+ * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP)
+ * we should be able to prevent their memmory accesses via the lost fence
+ * registers over the course of the reset without the potential recursive
+ * of mutexes between the pagefault handler and reset.
+ *
+ * See igt/gem_mmap_gtt/hang
+ */
+#define __do_reset(fn, arg) stop_machine(fn, arg, NULL)
+#else
+#define __do_reset(fn, arg) fn(arg)
+#endif
+
+static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+{
+       struct __i915_reset arg = { i915, stalled_mask };
+       int err, i;
+
+       err = __do_reset(__i915_reset__BKL, &arg);
+       for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
+               msleep(100);
+               err = __do_reset(__i915_reset__BKL, &arg);
+       }
+
+       return err;
+}
+
+/**
+ * i915_reset - reset chip after a hang
+ * @i915: #drm_i915_private to reset
+ * @stalled_mask: mask of the stalled engines with the guilty requests
+ * @reason: user error message for why we are resetting
+ *
+ * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
+ *
+ * Procedure is fairly simple:
+ *   - reset the chip using the reset reg
+ *   - re-init context state
+ *   - re-init hardware status page
+ *   - re-init ring buffer
+ *   - re-init interrupt state
+ *   - re-init display
+ */
+void i915_reset(struct drm_i915_private *i915,
+               unsigned int stalled_mask,
+               const char *reason)
+{
+       struct i915_gpu_error *error = &i915->gpu_error;
+       int ret;
+
+       GEM_TRACE("flags=%lx\n", error->flags);
+
+       might_sleep();
+       assert_rpm_wakelock_held(i915);
+       GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
+
+       /* Clear any previous failed attempts at recovery. Time to try again. */
+       if (!i915_gem_unset_wedged(i915))
+               return;
+
+       if (reason)
+               dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
+       error->reset_count++;
+
+       reset_prepare(i915);
+
+       if (!intel_has_gpu_reset(i915)) {
+               if (i915_modparams.reset)
+                       dev_err(i915->drm.dev, "GPU reset not supported\n");
+               else
+                       DRM_DEBUG_DRIVER("GPU reset disabled\n");
+               goto error;
+       }
+
+       if (do_reset(i915, stalled_mask)) {
+               dev_err(i915->drm.dev, "Failed to reset chip\n");
+               goto taint;
+       }
+
+       intel_overlay_reset(i915);
+
+       /*
+        * Next we need to restore the context, but we don't use those
+        * yet either...
+        *
+        * Ring buffer needs to be re-initialized in the KMS case, or if X
+        * was running at the time of the reset (i.e. we weren't VT
+        * switched away).
+        */
+       ret = i915_gem_init_hw(i915);
+       if (ret) {
+               DRM_ERROR("Failed to initialise HW following reset (%d)\n",
+                         ret);
+               goto error;
+       }
+
+       i915_queue_hangcheck(i915);
+
+finish:
+       reset_finish(i915);
+       if (!i915_terminally_wedged(error))
+               reset_restart(i915);
+       return;
+
+taint:
+       /*
+        * History tells us that if we cannot reset the GPU now, we
+        * never will. This then impacts everything that is run
+        * subsequently. On failing the reset, we mark the driver
+        * as wedged, preventing further execution on the GPU.
+        * We also want to go one step further and add a taint to the
+        * kernel so that any subsequent faults can be traced back to
+        * this failure. This is important for CI, where if the
+        * GPU/driver fails we would like to reboot and restart testing
+        * rather than continue on into oblivion. For everyone else,
+        * the system should still plod along, but they have been warned!
+        */
+       add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+error:
+       i915_gem_set_wedged(i915);
+       goto finish;
+}
+
+static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
+                                       struct intel_engine_cs *engine)
+{
+       return intel_gpu_reset(i915, intel_engine_flag(engine));
+}
+
+/**
+ * i915_reset_engine - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no dev_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ *  - identifies the request that caused the hang and it is dropped
+ *  - reset engine (which will force the engine to idle)
+ *  - re-init/configure engine
+ */
+int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
+{
+       struct i915_gpu_error *error = &engine->i915->gpu_error;
+       int ret;
+
+       GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
+       GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+
+       reset_prepare_engine(engine);
+
+       if (msg)
+               dev_notice(engine->i915->drm.dev,
+                          "Resetting %s for %s\n", engine->name, msg);
+       error->reset_engine_count[engine->id]++;
+
+       if (!engine->i915->guc.execbuf_client)
+               ret = intel_gt_reset_engine(engine->i915, engine);
+       else
+               ret = intel_guc_reset_engine(&engine->i915->guc, engine);
+       if (ret) {
+               /* If we fail here, we expect to fallback to a global reset */
+               DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
+                                engine->i915->guc.execbuf_client ? "GuC " : "",
+                                engine->name, ret);
+               goto out;
+       }
+
+       /*
+        * The request that caused the hang is stuck on elsp, we know the
+        * active request and can drop it, adjust head to skip the offending
+        * request to resume executing remaining requests in the queue.
+        */
+       intel_engine_reset(engine, true);
+
+       /*
+        * The engine and its registers (and workarounds in case of render)
+        * have been reset to their default values. Follow the init_ring
+        * process to program RING_MODE, HWSP and re-enable submission.
+        */
+       ret = engine->init_hw(engine);
+       if (ret)
+               goto out;
+
+out:
+       intel_engine_cancel_stop_cs(engine);
+       reset_finish_engine(engine);
+       return ret;
+}
+
+static void i915_reset_device(struct drm_i915_private *i915,
+                             u32 engine_mask,
+                             const char *reason)
+{
+       struct i915_gpu_error *error = &i915->gpu_error;
+       struct kobject *kobj = &i915->drm.primary->kdev->kobj;
+       char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+       char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+       char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+       struct i915_wedge_me w;
+
+       kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
+
+       DRM_DEBUG_DRIVER("resetting chip\n");
+       kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
+       /* Use a watchdog to ensure that our reset completes */
+       i915_wedge_on_timeout(&w, i915, 5 * HZ) {
+               intel_prepare_reset(i915);
+
+               i915_reset(i915, engine_mask, reason);
+
+               intel_finish_reset(i915);
+       }
+
+       if (!test_bit(I915_WEDGED, &error->flags))
+               kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
+}
+
+void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+       u32 eir;
+
+       if (!IS_GEN(dev_priv, 2))
+               I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+
+       if (INTEL_GEN(dev_priv) < 4)
+               I915_WRITE(IPEIR, I915_READ(IPEIR));
+       else
+               I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+
+       I915_WRITE(EIR, I915_READ(EIR));
+       eir = I915_READ(EIR);
+       if (eir) {
+               /*
+                * some errors might have become stuck,
+                * mask them.
+                */
+               DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+               I915_WRITE(EMR, I915_READ(EMR) | eir);
+               I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
+       }
+
+       if (INTEL_GEN(dev_priv) >= 8) {
+               I915_WRITE(GEN8_RING_FAULT_REG,
+                          I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
+               POSTING_READ(GEN8_RING_FAULT_REG);
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               struct intel_engine_cs *engine;
+               enum intel_engine_id id;
+
+               for_each_engine(engine, dev_priv, id) {
+                       I915_WRITE(RING_FAULT_REG(engine),
+                                  I915_READ(RING_FAULT_REG(engine)) &
+                                  ~RING_FAULT_VALID);
+               }
+               POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+       }
+}
+
+/**
+ * i915_handle_error - handle a gpu error
+ * @i915: i915 device private
+ * @engine_mask: mask representing engines that are hung
+ * @flags: control flags
+ * @fmt: Error message format string
+ *
+ * Do some basic checking of register state at error time and
+ * dump it to the syslog.  Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs.  Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_i915_private *i915,
+                      u32 engine_mask,
+                      unsigned long flags,
+                      const char *fmt, ...)
+{
+       struct intel_engine_cs *engine;
+       intel_wakeref_t wakeref;
+       unsigned int tmp;
+       char error_msg[80];
+       char *msg = NULL;
+
+       if (fmt) {
+               va_list args;
+
+               va_start(args, fmt);
+               vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+               va_end(args);
+
+               msg = error_msg;
+       }
+
+       /*
+        * In most cases it's guaranteed that we get here with an RPM
+        * reference held, for example because there is a pending GPU
+        * request that won't finish until the reset is done. This
+        * isn't the case at least when we get here by doing a
+        * simulated reset via debugfs, so get an RPM reference.
+        */
+       wakeref = intel_runtime_pm_get(i915);
+
+       engine_mask &= INTEL_INFO(i915)->ring_mask;
+
+       if (flags & I915_ERROR_CAPTURE) {
+               i915_capture_error_state(i915, engine_mask, msg);
+               i915_clear_error_registers(i915);
+       }
+
+       /*
+        * Try engine reset when available. We fall back to full reset if
+        * single reset fails.
+        */
+       if (intel_has_reset_engine(i915) &&
+           !i915_terminally_wedged(&i915->gpu_error)) {
+               for_each_engine_masked(engine, i915, engine_mask, tmp) {
+                       BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
+                       if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+                                            &i915->gpu_error.flags))
+                               continue;
+
+                       if (i915_reset_engine(engine, msg) == 0)
+                               engine_mask &= ~intel_engine_flag(engine);
+
+                       clear_bit(I915_RESET_ENGINE + engine->id,
+                                 &i915->gpu_error.flags);
+                       wake_up_bit(&i915->gpu_error.flags,
+                                   I915_RESET_ENGINE + engine->id);
+               }
+       }
+
+       if (!engine_mask)
+               goto out;
+
+       /* Full reset needs the mutex, stop any other user trying to do so. */
+       if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
+               wait_event(i915->gpu_error.reset_queue,
+                          !test_bit(I915_RESET_BACKOFF,
+                                    &i915->gpu_error.flags));
+               goto out;
+       }
+
+       /* Prevent any other reset-engine attempt. */
+       for_each_engine(engine, i915, tmp) {
+               while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+                                       &i915->gpu_error.flags))
+                       wait_on_bit(&i915->gpu_error.flags,
+                                   I915_RESET_ENGINE + engine->id,
+                                   TASK_UNINTERRUPTIBLE);
+       }
+
+       i915_reset_device(i915, engine_mask, msg);
+
+       for_each_engine(engine, i915, tmp) {
+               clear_bit(I915_RESET_ENGINE + engine->id,
+                         &i915->gpu_error.flags);
+       }
+
+       clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+       wake_up_all(&i915->gpu_error.reset_queue);
+
+out:
+       intel_runtime_pm_put(i915, wakeref);
+}
+
+bool i915_reset_flush(struct drm_i915_private *i915)
+{
+       int err;
+
+       cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+
+       flush_workqueue(i915->wq);
+       GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));
+
+       mutex_lock(&i915->drm.struct_mutex);
+       err = i915_gem_wait_for_idle(i915,
+                                    I915_WAIT_LOCKED |
+                                    I915_WAIT_FOR_IDLE_BOOST,
+                                    MAX_SCHEDULE_TIMEOUT);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       return !err;
+}
+
+static void i915_wedge_me(struct work_struct *work)
+{
+       struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+       dev_err(w->i915->drm.dev,
+               "%s timed out, cancelling all in-flight rendering.\n",
+               w->name);
+       i915_gem_set_wedged(w->i915);
+}
+
+void __i915_init_wedge(struct i915_wedge_me *w,
+                      struct drm_i915_private *i915,
+                      long timeout,
+                      const char *name)
+{
+       w->i915 = i915;
+       w->name = name;
+
+       INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
+       schedule_delayed_work(&w->work, timeout);
+}
+
+void __i915_fini_wedge(struct i915_wedge_me *w)
+{
+       cancel_delayed_work_sync(&w->work);
+       destroy_delayed_work_on_stack(&w->work);
+       w->i915 = NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
new file mode 100644 (file)
index 0000000..f2d347f
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2008-2018 Intel Corporation
+ */
+
+#ifndef I915_RESET_H
+#define I915_RESET_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+struct intel_guc;
+
+__printf(4, 5)
+void i915_handle_error(struct drm_i915_private *i915,
+                      u32 engine_mask,
+                      unsigned long flags,
+                      const char *fmt, ...);
+#define I915_ERROR_CAPTURE BIT(0)
+
+void i915_clear_error_registers(struct drm_i915_private *i915);
+
+void i915_reset(struct drm_i915_private *i915,
+               unsigned int stalled_mask,
+               const char *reason);
+int i915_reset_engine(struct intel_engine_cs *engine,
+                     const char *reason);
+
+void i915_reset_request(struct i915_request *rq, bool guilty);
+bool i915_reset_flush(struct drm_i915_private *i915);
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915);
+bool intel_has_reset_engine(struct drm_i915_private *i915);
+
+int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
+
+int intel_reset_guc(struct drm_i915_private *i915);
+
+struct i915_wedge_me {
+       struct delayed_work work;
+       struct drm_i915_private *i915;
+       const char *name;
+};
+
+void __i915_init_wedge(struct i915_wedge_me *w,
+                      struct drm_i915_private *i915,
+                      long timeout,
+                      const char *name);
+void __i915_fini_wedge(struct i915_wedge_me *w);
+
+#define i915_wedge_on_timeout(W, DEV, TIMEOUT)                         \
+       for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__);        \
+            (W)->i915;                                                 \
+            __i915_fini_wedge((W)))
+
+#endif /* I915_RESET_H */
index 340faea6c08a21fd8bebdc7c715d29a7ca53c8f5..d01683167c7747e58ea2d85e9dcf2cbc45fb0c3e 100644 (file)
@@ -127,8 +127,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
        return rb_entry(rb, struct i915_priolist, node);
 }
 
-static void assert_priolists(struct intel_engine_execlists * const execlists,
-                            long queue_priority)
+static void assert_priolists(struct intel_engine_execlists * const execlists)
 {
        struct rb_node *rb;
        long last_prio, i;
@@ -139,7 +138,7 @@ static void assert_priolists(struct intel_engine_execlists * const execlists,
        GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
                   rb_first(&execlists->queue.rb_root));
 
-       last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+       last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
                const struct i915_priolist *p = to_priolist(rb);
 
@@ -166,7 +165,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
        int idx, i;
 
        lockdep_assert_held(&engine->timeline.lock);
-       assert_priolists(execlists, INT_MAX);
+       assert_priolists(execlists);
 
        /* buckets sorted from highest [in slot 0] to lowest priority */
        idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
@@ -239,6 +238,18 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
        return engine;
 }
 
+static bool inflight(const struct i915_request *rq,
+                    const struct intel_engine_cs *engine)
+{
+       const struct i915_request *active;
+
+       if (!i915_request_is_active(rq))
+               return false;
+
+       active = port_request(engine->execlists.port);
+       return active->hw_context == rq->hw_context;
+}
+
 static void __i915_schedule(struct i915_request *rq,
                            const struct i915_sched_attr *attr)
 {
@@ -328,6 +339,7 @@ static void __i915_schedule(struct i915_request *rq,
                INIT_LIST_HEAD(&dep->dfs_link);
 
                engine = sched_lock_engine(node, engine);
+               lockdep_assert_held(&engine->timeline.lock);
 
                /* Recheck after acquiring the engine->timeline.lock */
                if (prio <= node->attr.priority || node_signaled(node))
@@ -353,20 +365,19 @@ static void __i915_schedule(struct i915_request *rq,
                                continue;
                }
 
-               if (prio <= engine->execlists.queue_priority)
+               if (prio <= engine->execlists.queue_priority_hint)
                        continue;
 
+               engine->execlists.queue_priority_hint = prio;
+
                /*
                 * If we are already the currently executing context, don't
                 * bother evaluating if we should preempt ourselves.
                 */
-               if (node_to_request(node)->global_seqno &&
-                   i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
-                                     node_to_request(node)->global_seqno))
+               if (inflight(node_to_request(node), engine))
                        continue;
 
                /* Defer (tasklet) submission until after all of our updates. */
-               engine->execlists.queue_priority = prio;
                tasklet_hi_schedule(&engine->execlists.tasklet);
        }
 
index a73472dd12fd926da14d45e35af9a4ac70b97a1e..207e21b478f21afe1165d863f082acd092a7cbf5 100644 (file)
@@ -31,6 +31,7 @@ struct i915_selftest {
        unsigned long timeout_jiffies;
        unsigned int timeout_ms;
        unsigned int random_seed;
+       char *filter;
        int mock;
        int live;
 };
index 8f3aa4dc0c98596a3b0443e493be84403d120bc3..d2f2a9c2fabd67d206ff5191d0af8a149d1088eb 100644 (file)
@@ -24,7 +24,6 @@
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "intel_drv.h"
 #include "i915_reg.h"
@@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv)
 
        i915_save_display(dev_priv);
 
-       if (IS_GEN4(dev_priv))
+       if (IS_GEN(dev_priv, 4))
                pci_read_config_word(pdev, GCDGMBUS,
                                     &dev_priv->regfile.saveGCDGMBUS);
 
@@ -77,17 +76,17 @@ int i915_save_state(struct drm_i915_private *dev_priv)
        dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
 
        /* Scratch space */
-       if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+       if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
                for (i = 0; i < 7; i++) {
                        dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
                        dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
                }
                for (i = 0; i < 3; i++)
                        dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                for (i = 0; i < 7; i++)
                        dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
-       } else if (HAS_GMCH_DISPLAY(dev_priv)) {
+       } else if (HAS_GMCH(dev_priv)) {
                for (i = 0; i < 16; i++) {
                        dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
                        dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
@@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
-       if (IS_GEN4(dev_priv))
+       if (IS_GEN(dev_priv, 4))
                pci_write_config_word(pdev, GCDGMBUS,
                                      dev_priv->regfile.saveGCDGMBUS);
        i915_restore_display(dev_priv);
@@ -122,17 +121,17 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
        I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
 
        /* Scratch space */
-       if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+       if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
                for (i = 0; i < 7; i++) {
                        I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
                        I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
                }
                for (i = 0; i < 3; i++)
                        I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                for (i = 0; i < 7; i++)
                        I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
-       } else if (HAS_GMCH_DISPLAY(dev_priv)) {
+       } else if (HAS_GMCH(dev_priv)) {
                for (i = 0; i < 16; i++) {
                        I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
                        I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
index 535caebd9813af5d82701b1d7e47b987563cd608..41313005af42517873fa29d94016c17cf05cd317 100644 (file)
@@ -42,11 +42,11 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
 static u32 calc_residency(struct drm_i915_private *dev_priv,
                          i915_reg_t reg)
 {
-       u64 res;
+       intel_wakeref_t wakeref;
+       u64 res = 0;
 
-       intel_runtime_pm_get(dev_priv);
-       res = intel_rc6_residency_us(dev_priv, reg);
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref)
+               res = intel_rc6_residency_us(dev_priv, reg);
 
        return DIV_ROUND_CLOSEST_ULL(res, 1000);
 }
@@ -258,9 +258,10 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
                                    struct device_attribute *attr, char *buf)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       intel_wakeref_t wakeref;
        int ret;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->pcu_lock);
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -274,7 +275,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
        }
        mutex_unlock(&dev_priv->pcu_lock);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
@@ -354,6 +355,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       intel_wakeref_t wakeref;
        u32 val;
        ssize_t ret;
 
@@ -361,7 +363,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->pcu_lock);
 
@@ -371,7 +373,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
            val > rps->max_freq ||
            val < rps->min_freq_softlimit) {
                mutex_unlock(&dev_priv->pcu_lock);
-               intel_runtime_pm_put(dev_priv);
+               intel_runtime_pm_put(dev_priv, wakeref);
                return -EINVAL;
        }
 
@@ -392,7 +394,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
        mutex_unlock(&dev_priv->pcu_lock);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return ret ?: count;
 }
@@ -412,6 +414,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       intel_wakeref_t wakeref;
        u32 val;
        ssize_t ret;
 
@@ -419,7 +422,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->pcu_lock);
 
@@ -429,7 +432,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
            val > rps->max_freq ||
            val > rps->max_freq_softlimit) {
                mutex_unlock(&dev_priv->pcu_lock);
-               intel_runtime_pm_put(dev_priv);
+               intel_runtime_pm_put(dev_priv, wakeref);
                return -EINVAL;
        }
 
@@ -446,7 +449,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
        mutex_unlock(&dev_priv->pcu_lock);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return ret ?: count;
 }
@@ -521,7 +524,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
        ssize_t ret;
 
        gpu = i915_first_error_state(i915);
-       if (gpu) {
+       if (IS_ERR(gpu)) {
+               ret = PTR_ERR(gpu);
+       } else if (gpu) {
                ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
                i915_gpu_state_put(gpu);
        } else {
index 4667cc08c416c16b8c9f8efbfb723d5064e75457..b2202d2e58a26341ab57cdc0fc3c86247d653ac4 100644 (file)
 #include "i915_timeline.h"
 #include "i915_syncmap.h"
 
-void i915_timeline_init(struct drm_i915_private *i915,
-                       struct i915_timeline *timeline,
-                       const char *name)
+struct i915_timeline_hwsp {
+       struct i915_vma *vma;
+       struct list_head free_link;
+       u64 free_bitmap;
+};
+
+static inline struct i915_timeline_hwsp *
+i915_timeline_hwsp(const struct i915_timeline *tl)
+{
+       return tl->hwsp_ggtt->private;
+}
+
+static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+
+       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+       if (IS_ERR(vma))
+               i915_gem_object_put(obj);
+
+       return vma;
+}
+
+static struct i915_vma *
+hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
 {
-       lockdep_assert_held(&i915->drm.struct_mutex);
+       struct drm_i915_private *i915 = timeline->i915;
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+       struct i915_timeline_hwsp *hwsp;
+
+       BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
+
+       spin_lock(&gt->hwsp_lock);
+
+       /* hwsp_free_list only contains HWSP that have available cachelines */
+       hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
+                                       typeof(*hwsp), free_link);
+       if (!hwsp) {
+               struct i915_vma *vma;
+
+               spin_unlock(&gt->hwsp_lock);
+
+               hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
+               if (!hwsp)
+                       return ERR_PTR(-ENOMEM);
+
+               vma = __hwsp_alloc(i915);
+               if (IS_ERR(vma)) {
+                       kfree(hwsp);
+                       return vma;
+               }
+
+               vma->private = hwsp;
+               hwsp->vma = vma;
+               hwsp->free_bitmap = ~0ull;
+
+               spin_lock(&gt->hwsp_lock);
+               list_add(&hwsp->free_link, &gt->hwsp_free_list);
+       }
+
+       GEM_BUG_ON(!hwsp->free_bitmap);
+       *cacheline = __ffs64(hwsp->free_bitmap);
+       hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
+       if (!hwsp->free_bitmap)
+               list_del(&hwsp->free_link);
+
+       spin_unlock(&gt->hwsp_lock);
+
+       GEM_BUG_ON(hwsp->vma->private != hwsp);
+       return hwsp->vma;
+}
+
+static void hwsp_free(struct i915_timeline *timeline)
+{
+       struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
+       struct i915_timeline_hwsp *hwsp;
+
+       hwsp = i915_timeline_hwsp(timeline);
+       if (!hwsp) /* leave global HWSP alone! */
+               return;
+
+       spin_lock(&gt->hwsp_lock);
+
+       /* As a cacheline becomes available, publish the HWSP on the freelist */
+       if (!hwsp->free_bitmap)
+               list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
+
+       hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
+
+       /* And if no one is left using it, give the page back to the system */
+       if (hwsp->free_bitmap == ~0ull) {
+               i915_vma_put(hwsp->vma);
+               list_del(&hwsp->free_link);
+               kfree(hwsp);
+       }
+
+       spin_unlock(&gt->hwsp_lock);
+}
+
+int i915_timeline_init(struct drm_i915_private *i915,
+                      struct i915_timeline *timeline,
+                      const char *name,
+                      struct i915_vma *hwsp)
+{
+       void *vaddr;
 
        /*
         * Ideally we want a set of engines on a single leaf as we expect
         * to mostly be tracking synchronisation between engines. It is not
         * a huge issue if this is not the case, but we may want to mitigate
         * any page crossing penalties if they become an issue.
+        *
+        * Called during early_init before we know how many engines there are.
         */
        BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
 
+       timeline->i915 = i915;
        timeline->name = name;
+       timeline->pin_count = 0;
+       timeline->has_initial_breadcrumb = !hwsp;
 
-       list_add(&timeline->link, &i915->gt.timelines);
+       timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
+       if (!hwsp) {
+               unsigned int cacheline;
+
+               hwsp = hwsp_alloc(timeline, &cacheline);
+               if (IS_ERR(hwsp))
+                       return PTR_ERR(hwsp);
+
+               timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
+       }
+       timeline->hwsp_ggtt = i915_vma_get(hwsp);
+
+       vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
+       if (IS_ERR(vaddr)) {
+               hwsp_free(timeline);
+               i915_vma_put(hwsp);
+               return PTR_ERR(vaddr);
+       }
 
-       /* Called during early_init before we know how many engines there are */
+       timeline->hwsp_seqno =
+               memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
 
        timeline->fence_context = dma_fence_context_alloc(1);
 
        spin_lock_init(&timeline->lock);
 
-       init_request_active(&timeline->last_request, NULL);
+       INIT_ACTIVE_REQUEST(&timeline->barrier);
+       INIT_ACTIVE_REQUEST(&timeline->last_request);
        INIT_LIST_HEAD(&timeline->requests);
 
        i915_syncmap_init(&timeline->sync);
+
+       return 0;
+}
+
+void i915_timelines_init(struct drm_i915_private *i915)
+{
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+
+       mutex_init(&gt->mutex);
+       INIT_LIST_HEAD(&gt->active_list);
+
+       spin_lock_init(&gt->hwsp_lock);
+       INIT_LIST_HEAD(&gt->hwsp_free_list);
+
+       /* via i915_gem_wait_for_idle() */
+       i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
+}
+
+static void timeline_add_to_active(struct i915_timeline *tl)
+{
+       struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+
+       mutex_lock(&gt->mutex);
+       list_add(&tl->link, &gt->active_list);
+       mutex_unlock(&gt->mutex);
+}
+
+static void timeline_remove_from_active(struct i915_timeline *tl)
+{
+       struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+
+       mutex_lock(&gt->mutex);
+       list_del(&tl->link);
+       mutex_unlock(&gt->mutex);
 }
 
 /**
@@ -51,11 +216,11 @@ void i915_timeline_init(struct drm_i915_private *i915,
  */
 void i915_timelines_park(struct drm_i915_private *i915)
 {
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
        struct i915_timeline *timeline;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
-       list_for_each_entry(timeline, &i915->gt.timelines, link) {
+       mutex_lock(&gt->mutex);
+       list_for_each_entry(timeline, &gt->active_list, link) {
                /*
                 * All known fences are completed so we can scrap
                 * the current sync point tracking and start afresh,
@@ -64,32 +229,88 @@ void i915_timelines_park(struct drm_i915_private *i915)
                 */
                i915_syncmap_free(&timeline->sync);
        }
+       mutex_unlock(&gt->mutex);
 }
 
 void i915_timeline_fini(struct i915_timeline *timeline)
 {
+       GEM_BUG_ON(timeline->pin_count);
        GEM_BUG_ON(!list_empty(&timeline->requests));
+       GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
 
        i915_syncmap_free(&timeline->sync);
+       hwsp_free(timeline);
 
-       list_del(&timeline->link);
+       i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+       i915_vma_put(timeline->hwsp_ggtt);
 }
 
 struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915, const char *name)
+i915_timeline_create(struct drm_i915_private *i915,
+                    const char *name,
+                    struct i915_vma *global_hwsp)
 {
        struct i915_timeline *timeline;
+       int err;
 
        timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
        if (!timeline)
                return ERR_PTR(-ENOMEM);
 
-       i915_timeline_init(i915, timeline, name);
+       err = i915_timeline_init(i915, timeline, name, global_hwsp);
+       if (err) {
+               kfree(timeline);
+               return ERR_PTR(err);
+       }
+
        kref_init(&timeline->kref);
 
        return timeline;
 }
 
+int i915_timeline_pin(struct i915_timeline *tl)
+{
+       int err;
+
+       if (tl->pin_count++)
+               return 0;
+       GEM_BUG_ON(!tl->pin_count);
+
+       err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
+       if (err)
+               goto unpin;
+
+       tl->hwsp_offset =
+               i915_ggtt_offset(tl->hwsp_ggtt) +
+               offset_in_page(tl->hwsp_offset);
+
+       timeline_add_to_active(tl);
+
+       return 0;
+
+unpin:
+       tl->pin_count = 0;
+       return err;
+}
+
+void i915_timeline_unpin(struct i915_timeline *tl)
+{
+       GEM_BUG_ON(!tl->pin_count);
+       if (--tl->pin_count)
+               return;
+
+       timeline_remove_from_active(tl);
+
+       /*
+        * Since this timeline is idle, all bariers upon which we were waiting
+        * must also be complete and so we can discard the last used barriers
+        * without loss of information.
+        */
+       i915_syncmap_free(&tl->sync);
+
+       __i915_vma_unpin(tl->hwsp_ggtt);
+}
+
 void __i915_timeline_free(struct kref *kref)
 {
        struct i915_timeline *timeline =
@@ -99,6 +320,16 @@ void __i915_timeline_free(struct kref *kref)
        kfree(timeline);
 }
 
+void i915_timelines_fini(struct drm_i915_private *i915)
+{
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+
+       GEM_BUG_ON(!list_empty(&gt->active_list));
+       GEM_BUG_ON(!list_empty(&gt->hwsp_free_list));
+
+       mutex_destroy(&gt->mutex);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_timeline.c"
 #include "selftests/i915_timeline.c"
index ebd71b487220aec95a2bfdeb1c924f0836ae64c1..7bec7d2e45bfa242d30d8a76f38b99ae44226903 100644 (file)
 #include <linux/list.h>
 #include <linux/kref.h>
 
+#include "i915_active.h"
 #include "i915_request.h"
 #include "i915_syncmap.h"
 #include "i915_utils.h"
 
+struct i915_vma;
+struct i915_timeline_hwsp;
+
 struct i915_timeline {
        u64 fence_context;
        u32 seqno;
@@ -40,6 +44,13 @@ struct i915_timeline {
 #define TIMELINE_CLIENT 0 /* default subclass */
 #define TIMELINE_ENGINE 1
 
+       unsigned int pin_count;
+       const u32 *hwsp_seqno;
+       struct i915_vma *hwsp_ggtt;
+       u32 hwsp_offset;
+
+       bool has_initial_breadcrumb;
+
        /**
         * List of breadcrumbs associated with GPU requests currently
         * outstanding.
@@ -48,10 +59,10 @@ struct i915_timeline {
 
        /* Contains an RCU guarded pointer to the last request. No reference is
         * held to the request, users must carefully acquire a reference to
-        * the request using i915_gem_active_get_request_rcu(), or hold the
+        * the request using i915_active_request_get_request_rcu(), or hold the
         * struct_mutex.
         */
-       struct i915_gem_active last_request;
+       struct i915_active_request last_request;
 
        /**
         * We track the most recent seqno that we wait on in every context so
@@ -63,24 +74,28 @@ struct i915_timeline {
         * redundant and we can discard it without loss of generality.
         */
        struct i915_syncmap *sync;
+
        /**
-        * Separately to the inter-context seqno map above, we track the last
-        * barrier (e.g. semaphore wait) to the global engine timelines. Note
-        * that this tracks global_seqno rather than the context.seqno, and
-        * so it is subject to the limitations of hw wraparound and that we
-        * may need to revoke global_seqno (on pre-emption).
+        * Barrier provides the ability to serialize ordering between different
+        * timelines.
+        *
+        * Users can call i915_timeline_set_barrier which will make all
+        * subsequent submissions to this timeline be executed only after the
+        * barrier has been completed.
         */
-       u32 global_sync[I915_NUM_ENGINES];
+       struct i915_active_request barrier;
 
        struct list_head link;
        const char *name;
+       struct drm_i915_private *i915;
 
        struct kref kref;
 };
 
-void i915_timeline_init(struct drm_i915_private *i915,
-                       struct i915_timeline *tl,
-                       const char *name);
+int i915_timeline_init(struct drm_i915_private *i915,
+                      struct i915_timeline *tl,
+                      const char *name,
+                      struct i915_vma *hwsp);
 void i915_timeline_fini(struct i915_timeline *tl);
 
 static inline void
@@ -103,7 +118,9 @@ i915_timeline_set_subclass(struct i915_timeline *timeline,
 }
 
 struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915, const char *name);
+i915_timeline_create(struct drm_i915_private *i915,
+                    const char *name,
+                    struct i915_vma *global_hwsp);
 
 static inline struct i915_timeline *
 i915_timeline_get(struct i915_timeline *timeline)
@@ -142,6 +159,26 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
        return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
 }
 
+int i915_timeline_pin(struct i915_timeline *tl);
+void i915_timeline_unpin(struct i915_timeline *tl);
+
+void i915_timelines_init(struct drm_i915_private *i915);
 void i915_timelines_park(struct drm_i915_private *i915);
+void i915_timelines_fini(struct drm_i915_private *i915);
+
+/**
+ * i915_timeline_set_barrier - orders submission between different timelines
+ * @timeline: timeline to set the barrier on
+ * @rq: request after which new submissions can proceed
+ *
+ * Sets the passed in request as the serialization point for all subsequent
+ * submissions on @timeline. Subsequent requests will not be submitted to GPU
+ * until the barrier has been completed.
+ */
+static inline int
+i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
+{
+       return i915_active_request_set(&tl->barrier, rq);
+}
 
 #endif
index b50c6b829715e220c9f3edede3dfa0e83497a804..eab313c3163c91e97e6d1e4449e12d963ca6773f 100644 (file)
@@ -6,7 +6,8 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
@@ -585,35 +586,6 @@ TRACE_EVENT(i915_gem_evict_vm,
            TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
 );
 
-TRACE_EVENT(i915_gem_ring_sync_to,
-           TP_PROTO(struct i915_request *to, struct i915_request *from),
-           TP_ARGS(to, from),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u32, from_class)
-                            __field(u32, from_instance)
-                            __field(u32, to_class)
-                            __field(u32, to_instance)
-                            __field(u32, seqno)
-                            ),
-
-           TP_fast_assign(
-                          __entry->dev = from->i915->drm.primary->index;
-                          __entry->from_class = from->engine->uabi_class;
-                          __entry->from_instance = from->engine->instance;
-                          __entry->to_class = to->engine->uabi_class;
-                          __entry->to_instance = to->engine->instance;
-                          __entry->seqno = from->global_seqno;
-                          ),
-
-           TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
-                     __entry->dev,
-                     __entry->from_class, __entry->from_instance,
-                     __entry->to_class, __entry->to_instance,
-                     __entry->seqno)
-);
-
 TRACE_EVENT(i915_request_queue,
            TP_PROTO(struct i915_request *rq, u32 flags),
            TP_ARGS(rq, flags),
@@ -780,31 +752,6 @@ trace_i915_request_out(struct i915_request *rq)
 #endif
 #endif
 
-TRACE_EVENT(intel_engine_notify,
-           TP_PROTO(struct intel_engine_cs *engine, bool waiters),
-           TP_ARGS(engine, waiters),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u16, class)
-                            __field(u16, instance)
-                            __field(u32, seqno)
-                            __field(bool, waiters)
-                            ),
-
-           TP_fast_assign(
-                          __entry->dev = engine->i915->drm.primary->index;
-                          __entry->class = engine->uabi_class;
-                          __entry->instance = engine->instance;
-                          __entry->seqno = intel_engine_get_seqno(engine);
-                          __entry->waiters = waiters;
-                          ),
-
-           TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
-                     __entry->dev, __entry->class, __entry->instance,
-                     __entry->seqno, __entry->waiters)
-);
-
 DEFINE_EVENT(i915_request, i915_request_retire,
            TP_PROTO(struct i915_request *rq),
            TP_ARGS(rq)
index 5b4d78cdb4ca32c4162322b4750e2dec80fe99d1..b713bed20c3880c088a45c49e8a787c786f6e971 100644 (file)
@@ -63,24 +63,22 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
 
 #endif
 
-struct i915_vma_active {
-       struct i915_gem_active base;
-       struct i915_vma *vma;
-       struct rb_node node;
-       u64 timeline;
-};
-
-static void
-__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
+static void obj_bump_mru(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj = vma->obj;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
-       GEM_BUG_ON(!i915_vma_is_active(vma));
-       if (--vma->active_count)
-               return;
+       spin_lock(&i915->mm.obj_lock);
+       if (obj->bind_count)
+               list_move_tail(&obj->mm.link, &i915->mm.bound_list);
+       spin_unlock(&i915->mm.obj_lock);
 
-       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+       obj->mm.dirty = true; /* be paranoid  */
+}
+
+static void __i915_vma_retire(struct i915_active *ref)
+{
+       struct i915_vma *vma = container_of(ref, typeof(*vma), active);
+       struct drm_i915_gem_object *obj = vma->obj;
 
        GEM_BUG_ON(!i915_gem_object_is_active(obj));
        if (--obj->active_count)
@@ -93,16 +91,12 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
                reservation_object_unlock(obj->resv);
        }
 
-       /* Bump our place on the bound list to keep it roughly in LRU order
+       /*
+        * Bump our place on the bound list to keep it roughly in LRU order
         * so that we don't steal from recently used but inactive objects
         * (unless we are forced to ofc!)
         */
-       spin_lock(&rq->i915->mm.obj_lock);
-       if (obj->bind_count)
-               list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
-       spin_unlock(&rq->i915->mm.obj_lock);
-
-       obj->mm.dirty = true; /* be paranoid  */
+       obj_bump_mru(obj);
 
        if (i915_gem_object_has_active_reference(obj)) {
                i915_gem_object_clear_active_reference(obj);
@@ -110,21 +104,6 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
        }
 }
 
-static void
-i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
-{
-       struct i915_vma_active *active =
-               container_of(base, typeof(*active), base);
-
-       __i915_vma_retire(active->vma, rq);
-}
-
-static void
-i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
-{
-       __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
-}
-
 static struct i915_vma *
 vma_create(struct drm_i915_gem_object *obj,
           struct i915_address_space *vm,
@@ -140,10 +119,9 @@ vma_create(struct drm_i915_gem_object *obj,
        if (vma == NULL)
                return ERR_PTR(-ENOMEM);
 
-       vma->active = RB_ROOT;
+       i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
+       INIT_ACTIVE_REQUEST(&vma->last_fence);
 
-       init_request_active(&vma->last_active, i915_vma_last_retire);
-       init_request_active(&vma->last_fence, NULL);
        vma->vm = vm;
        vma->ops = &vm->vma_ops;
        vma->obj = obj;
@@ -190,33 +168,56 @@ vma_create(struct drm_i915_gem_object *obj,
                                                                i915_gem_object_get_stride(obj));
                GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
 
-               /*
-                * We put the GGTT vma at the start of the vma-list, followed
-                * by the ppGGTT vma. This allows us to break early when
-                * iterating over only the GGTT vma for an object, see
-                * for_each_ggtt_vma()
-                */
                vma->flags |= I915_VMA_GGTT;
-               list_add(&vma->obj_link, &obj->vma_list);
-       } else {
-               list_add_tail(&vma->obj_link, &obj->vma_list);
        }
 
+       spin_lock(&obj->vma.lock);
+
        rb = NULL;
-       p = &obj->vma_tree.rb_node;
+       p = &obj->vma.tree.rb_node;
        while (*p) {
                struct i915_vma *pos;
+               long cmp;
 
                rb = *p;
                pos = rb_entry(rb, struct i915_vma, obj_node);
-               if (i915_vma_compare(pos, vm, view) < 0)
+
+               /*
+                * If the view already exists in the tree, another thread
+                * already created a matching vma, so return the older instance
+                * and dispose of ours.
+                */
+               cmp = i915_vma_compare(pos, vm, view);
+               if (cmp == 0) {
+                       spin_unlock(&obj->vma.lock);
+                       kmem_cache_free(vm->i915->vmas, vma);
+                       return pos;
+               }
+
+               if (cmp < 0)
                        p = &rb->rb_right;
                else
                        p = &rb->rb_left;
        }
        rb_link_node(&vma->obj_node, rb, p);
-       rb_insert_color(&vma->obj_node, &obj->vma_tree);
+       rb_insert_color(&vma->obj_node, &obj->vma.tree);
+
+       if (i915_vma_is_ggtt(vma))
+               /*
+                * We put the GGTT vma at the start of the vma-list, followed
+                * by the ppGGTT vma. This allows us to break early when
+                * iterating over only the GGTT vma for an object, see
+                * for_each_ggtt_vma()
+                */
+               list_add(&vma->obj_link, &obj->vma.list);
+       else
+               list_add_tail(&vma->obj_link, &obj->vma.list);
+
+       spin_unlock(&obj->vma.lock);
+
+       mutex_lock(&vm->mutex);
        list_add(&vma->vm_link, &vm->unbound_list);
+       mutex_unlock(&vm->mutex);
 
        return vma;
 
@@ -232,7 +233,7 @@ vma_lookup(struct drm_i915_gem_object *obj,
 {
        struct rb_node *rb;
 
-       rb = obj->vma_tree.rb_node;
+       rb = obj->vma.tree.rb_node;
        while (rb) {
                struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
                long cmp;
@@ -272,16 +273,18 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 {
        struct i915_vma *vma;
 
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
        GEM_BUG_ON(view && !i915_is_ggtt(vm));
        GEM_BUG_ON(vm->closed);
 
+       spin_lock(&obj->vma.lock);
        vma = vma_lookup(obj, vm, view);
-       if (!vma)
+       spin_unlock(&obj->vma.lock);
+
+       /* vma_create() will resolve the race if another creates the vma */
+       if (unlikely(!vma))
                vma = vma_create(obj, vm, view);
 
        GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
-       GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
        return vma;
 }
 
@@ -659,7 +662,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
 
-       list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+       mutex_lock(&vma->vm->mutex);
+       list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+       mutex_unlock(&vma->vm->mutex);
 
        if (vma->obj) {
                struct drm_i915_gem_object *obj = vma->obj;
@@ -692,8 +697,10 @@ i915_vma_remove(struct i915_vma *vma)
 
        vma->ops->clear_pages(vma);
 
+       mutex_lock(&vma->vm->mutex);
        drm_mm_remove_node(&vma->node);
        list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+       mutex_unlock(&vma->vm->mutex);
 
        /*
         * Since the unbound list is global, only move to that list if
@@ -797,23 +804,27 @@ void i915_vma_reopen(struct i915_vma *vma)
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
        struct drm_i915_private *i915 = vma->vm->i915;
-       struct i915_vma_active *iter, *n;
 
        GEM_BUG_ON(vma->node.allocated);
        GEM_BUG_ON(vma->fence);
 
-       GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+       GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
 
-       list_del(&vma->obj_link);
+       mutex_lock(&vma->vm->mutex);
        list_del(&vma->vm_link);
-       if (vma->obj)
-               rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+       mutex_unlock(&vma->vm->mutex);
+
+       if (vma->obj) {
+               struct drm_i915_gem_object *obj = vma->obj;
 
-       rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
-               GEM_BUG_ON(i915_gem_active_isset(&iter->base));
-               kfree(iter);
+               spin_lock(&obj->vma.lock);
+               list_del(&vma->obj_link);
+               rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+               spin_unlock(&obj->vma.lock);
        }
 
+       i915_active_fini(&vma->active);
+
        kmem_cache_free(i915->vmas, vma);
 }
 
@@ -897,104 +908,15 @@ static void export_fence(struct i915_vma *vma,
        reservation_object_unlock(resv);
 }
 
-static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
-{
-       struct i915_vma_active *active;
-       struct rb_node **p, *parent;
-       struct i915_request *old;
-
-       /*
-        * We track the most recently used timeline to skip a rbtree search
-        * for the common case, under typical loads we never need the rbtree
-        * at all. We can reuse the last_active slot if it is empty, that is
-        * after the previous activity has been retired, or if the active
-        * matches the current timeline.
-        *
-        * Note that we allow the timeline to be active simultaneously in
-        * the rbtree and the last_active cache. We do this to avoid having
-        * to search and replace the rbtree element for a new timeline, with
-        * the cost being that we must be aware that the vma may be retired
-        * twice for the same timeline (as the older rbtree element will be
-        * retired before the new request added to last_active).
-        */
-       old = i915_gem_active_raw(&vma->last_active,
-                                 &vma->vm->i915->drm.struct_mutex);
-       if (!old || old->fence.context == idx)
-               goto out;
-
-       /* Move the currently active fence into the rbtree */
-       idx = old->fence.context;
-
-       parent = NULL;
-       p = &vma->active.rb_node;
-       while (*p) {
-               parent = *p;
-
-               active = rb_entry(parent, struct i915_vma_active, node);
-               if (active->timeline == idx)
-                       goto replace;
-
-               if (active->timeline < idx)
-                       p = &parent->rb_right;
-               else
-                       p = &parent->rb_left;
-       }
-
-       active = kmalloc(sizeof(*active), GFP_KERNEL);
-
-       /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
-       if (unlikely(!i915_gem_active_raw(&vma->last_active,
-                                         &vma->vm->i915->drm.struct_mutex))) {
-               kfree(active);
-               goto out;
-       }
-
-       if (unlikely(!active))
-               return ERR_PTR(-ENOMEM);
-
-       init_request_active(&active->base, i915_vma_retire);
-       active->vma = vma;
-       active->timeline = idx;
-
-       rb_link_node(&active->node, parent, p);
-       rb_insert_color(&active->node, &vma->active);
-
-replace:
-       /*
-        * Overwrite the previous active slot in the rbtree with last_active,
-        * leaving last_active zeroed. If the previous slot is still active,
-        * we must be careful as we now only expect to receive one retire
-        * callback not two, and so much undo the active counting for the
-        * overwritten slot.
-        */
-       if (i915_gem_active_isset(&active->base)) {
-               /* Retire ourselves from the old rq->active_list */
-               __list_del_entry(&active->base.link);
-               vma->active_count--;
-               GEM_BUG_ON(!vma->active_count);
-       }
-       GEM_BUG_ON(list_empty(&vma->last_active.link));
-       list_replace_init(&vma->last_active.link, &active->base.link);
-       active->base.request = fetch_and_zero(&vma->last_active.request);
-
-out:
-       return &vma->last_active;
-}
-
 int i915_vma_move_to_active(struct i915_vma *vma,
                            struct i915_request *rq,
                            unsigned int flags)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct i915_gem_active *active;
 
        lockdep_assert_held(&rq->i915->drm.struct_mutex);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-       active = active_instance(vma, rq->fence.context);
-       if (IS_ERR(active))
-               return PTR_ERR(active);
-
        /*
         * Add a reference if we're newly entering the active list.
         * The order in which we add operations to the retirement queue is
@@ -1003,11 +925,15 @@ int i915_vma_move_to_active(struct i915_vma *vma,
         * add the active reference first and queue for it to be dropped
         * *last*.
         */
-       if (!i915_gem_active_isset(active) && !vma->active_count++) {
-               list_move_tail(&vma->vm_link, &vma->vm->active_list);
+       if (!vma->active.count)
                obj->active_count++;
+
+       if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
+               if (!vma->active.count)
+                       obj->active_count--;
+               return -ENOMEM;
        }
-       i915_gem_active_set(active, rq);
+
        GEM_BUG_ON(!i915_vma_is_active(vma));
        GEM_BUG_ON(!obj->active_count);
 
@@ -1016,14 +942,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                obj->write_domain = I915_GEM_DOMAIN_RENDER;
 
                if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
-                       i915_gem_active_set(&obj->frontbuffer_write, rq);
+                       __i915_active_request_set(&obj->frontbuffer_write, rq);
 
                obj->read_domains = 0;
        }
        obj->read_domains |= I915_GEM_GPU_DOMAINS;
 
        if (flags & EXEC_OBJECT_NEEDS_FENCE)
-               i915_gem_active_set(&vma->last_fence, rq);
+               __i915_active_request_set(&vma->last_fence, rq);
 
        export_fence(vma, rq, flags);
        return 0;
@@ -1041,8 +967,6 @@ int i915_vma_unbind(struct i915_vma *vma)
         */
        might_sleep();
        if (i915_vma_is_active(vma)) {
-               struct i915_vma_active *active, *n;
-
                /*
                 * When a closed VMA is retired, it is unbound - eek.
                 * In order to prevent it from being recursively closed,
@@ -1058,21 +982,12 @@ int i915_vma_unbind(struct i915_vma *vma)
                 */
                __i915_vma_pin(vma);
 
-               ret = i915_gem_active_retire(&vma->last_active,
-                                            &vma->vm->i915->drm.struct_mutex);
+               ret = i915_active_wait(&vma->active);
                if (ret)
                        goto unpin;
 
-               rbtree_postorder_for_each_entry_safe(active, n,
-                                                    &vma->active, node) {
-                       ret = i915_gem_active_retire(&active->base,
-                                                    &vma->vm->i915->drm.struct_mutex);
-                       if (ret)
-                               goto unpin;
-               }
-
-               ret = i915_gem_active_retire(&vma->last_fence,
-                                            &vma->vm->i915->drm.struct_mutex);
+               ret = i915_active_request_retire(&vma->last_fence,
+                                             &vma->vm->i915->drm.struct_mutex);
 unpin:
                __i915_vma_unpin(vma);
                if (ret)
index 4f7c1c7599f43c3590c99b3b8e122d87c4cc8030..7c742027f8661479d9079c71909bd9875d7ede3c 100644 (file)
@@ -34,6 +34,7 @@
 #include "i915_gem_fence_reg.h"
 #include "i915_gem_object.h"
 
+#include "i915_active.h"
 #include "i915_request.h"
 
 enum i915_cache_level;
@@ -71,34 +72,45 @@ struct i915_vma {
        unsigned int open_count;
        unsigned long flags;
        /**
-        * How many users have pinned this object in GTT space. The following
-        * users can each hold at most one reference: pwrite/pread, execbuffer
-        * (objects are not allowed multiple times for the same batchbuffer),
-        * and the framebuffer code. When switching/pageflipping, the
-        * framebuffer code has at most two buffers pinned per crtc.
+        * How many users have pinned this object in GTT space.
         *
-        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-        * bits with absolutely no headroom. So use 4 bits.
+        * This is a tightly bound, fairly small number of users, so we
+        * stuff inside the flags field so that we can both check for overflow
+        * and detect a no-op i915_vma_pin() in a single check, while also
+        * pinning the vma.
+        *
+        * The worst case display setup would have the same vma pinned for
+        * use on each plane on each crtc, while also building the next atomic
+        * state and holding a pin for the length of the cleanup queue. In the
+        * future, the flip queue may be increased from 1.
+        * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
+        *
+        * For GEM, the number of concurrent users for pwrite/pread is
+        * unbounded. For execbuffer, it is currently one but will in future
+        * be extended to allow multiple clients to pin vma concurrently.
+        *
+        * We also use suballocated pages, with each suballocation claiming
+        * its own pin on the shared vma. At present, this is limited to
+        * exclusive cachelines of a single page, so a maximum of 64 possible
+        * users.
         */
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW  BIT(5)
+#define I915_VMA_PIN_MASK 0xff
+#define I915_VMA_PIN_OVERFLOW  BIT(8)
 
        /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND   BIT(6)
-#define I915_VMA_LOCAL_BIND    BIT(7)
+#define I915_VMA_GLOBAL_BIND   BIT(9)
+#define I915_VMA_LOCAL_BIND    BIT(10)
 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
 
-#define I915_VMA_GGTT          BIT(8)
-#define I915_VMA_CAN_FENCE     BIT(9)
-#define I915_VMA_CLOSED                BIT(10)
-#define I915_VMA_USERFAULT_BIT 11
+#define I915_VMA_GGTT          BIT(11)
+#define I915_VMA_CAN_FENCE     BIT(12)
+#define I915_VMA_CLOSED                BIT(13)
+#define I915_VMA_USERFAULT_BIT 14
 #define I915_VMA_USERFAULT     BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE    BIT(12)
+#define I915_VMA_GGTT_WRITE    BIT(15)
 
-       unsigned int active_count;
-       struct rb_root active;
-       struct i915_gem_active last_active;
-       struct i915_gem_active last_fence;
+       struct i915_active active;
+       struct i915_active_request last_fence;
 
        /**
         * Support different GGTT views into the same object.
@@ -141,9 +153,9 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
 #define I915_VMA_RELEASE_MAP BIT(0)
 
-static inline bool i915_vma_is_active(struct i915_vma *vma)
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
 {
-       return vma->active_count;
+       return !i915_active_is_idle(&vma->active);
 }
 
 int __must_check i915_vma_move_to_active(struct i915_vma *vma,
@@ -425,7 +437,7 @@ void i915_vma_parked(struct drm_i915_private *i915);
  * or the list is empty ofc.
  */
 #define for_each_ggtt_vma(V, OBJ) \
-       list_for_each_entry(V, &(OBJ)->vma_list, obj_link)              \
+       list_for_each_entry(V, &(OBJ)->vma.list, obj_link)              \
                for_each_until(!i915_vma_is_ggtt(V))
 
 #endif
index 4dd793b789962873a3523222336ba7a5d99a0677..73a7bee24a663faa672ade21cfe7ea7cb1bc4b46 100644 (file)
@@ -337,9 +337,11 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
        }
 
        for_each_dsi_port(port, intel_dsi->ports) {
-               intel_display_power_get(dev_priv, port == PORT_A ?
-                                       POWER_DOMAIN_PORT_DDI_A_IO :
-                                       POWER_DOMAIN_PORT_DDI_B_IO);
+               intel_dsi->io_wakeref[port] =
+                       intel_display_power_get(dev_priv,
+                                               port == PORT_A ?
+                                               POWER_DOMAIN_PORT_DDI_A_IO :
+                                               POWER_DOMAIN_PORT_DDI_B_IO);
        }
 }
 
@@ -1125,10 +1127,18 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
        enum port port;
        u32 tmp;
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
-
-       if (intel_dsi->dual_link)
-               intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+       for_each_dsi_port(port, intel_dsi->ports) {
+               intel_wakeref_t wakeref;
+
+               wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
+               if (wakeref) {
+                       intel_display_power_put(dev_priv,
+                                               port == PORT_A ?
+                                               POWER_DOMAIN_PORT_DDI_A_IO :
+                                               POWER_DOMAIN_PORT_DDI_B_IO,
+                                               wakeref);
+               }
+       }
 
        /* set mode to DDI */
        for_each_dsi_port(port, intel_dsi->ports) {
@@ -1178,9 +1188,9 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
        pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
 }
 
-static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config,
-                                    struct drm_connector_state *conn_state)
+static int gen11_dsi_compute_config(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
                                                   base);
@@ -1205,7 +1215,7 @@ static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
        pipe_config->clock_set = true;
        pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
 
-       return true;
+       return 0;
 }
 
 static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
@@ -1229,13 +1239,15 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       u32 tmp;
-       enum port port;
        enum transcoder dsi_trans;
+       intel_wakeref_t wakeref;
+       enum port port;
        bool ret = false;
+       u32 tmp;
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return false;
 
        for_each_dsi_port(port, intel_dsi->ports) {
@@ -1260,7 +1272,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
                ret = tmp & PIPECONF_ENABLE;
        }
 out:
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
        return ret;
 }
 
@@ -1378,6 +1390,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
        encoder->disable = gen11_dsi_disable;
        encoder->port = port;
        encoder->get_config = gen11_dsi_get_config;
+       encoder->update_pipe = intel_panel_update_backlight;
        encoder->compute_config = gen11_dsi_compute_config;
        encoder->get_hw_state = gen11_dsi_get_hw_state;
        encoder->type = INTEL_OUTPUT_DSI;
index 6ba478e57b9bc51f9cebab5f6e00ad442ea8c13a..9d142d038a7d3631ac927280e549528e86756db5 100644 (file)
@@ -6,7 +6,6 @@
  */
 #include <linux/pci.h>
 #include <linux/acpi.h>
-#include <drm/drmP.h>
 #include "i915_drv.h"
 
 #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
index 8cb02f28d30cf6b12fe002c13c8bd9d17f9fa6f2..16263add3cdda4bf1ef90a1ed4f7d5adcfcacdcc 100644 (file)
@@ -29,7 +29,6 @@
  * See intel_atomic_plane.c for the plane-specific atomic functionality.
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
@@ -47,7 +46,7 @@
 int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
                                                const struct drm_connector_state *state,
                                                struct drm_property *property,
-                                               uint64_t *val)
+                                               u64 *val)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -79,7 +78,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
 int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
                                                struct drm_connector_state *state,
                                                struct drm_property *property,
-                                               uint64_t val)
+                                               u64 val)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -233,7 +232,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
        if (plane_state && plane_state->base.fb &&
            plane_state->base.fb->format->is_yuv &&
            plane_state->base.fb->format->num_planes > 1) {
-               if (IS_GEN9(dev_priv) &&
+               if (IS_GEN(dev_priv, 9) &&
                    !IS_GEMINILAKE(dev_priv)) {
                        mode = SKL_PS_SCALER_MODE_NV12;
                } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
index 0a73e6e65c2030b2f094c5a89f8248cb807a8020..a1a263026574288dcdb4f8b2c1671b2388d15256 100644 (file)
@@ -31,7 +31,6 @@
  * prepare/check/commit/cleanup steps.
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
@@ -111,41 +110,39 @@ intel_plane_destroy_state(struct drm_plane *plane,
 }
 
 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
-                                       struct intel_crtc_state *crtc_state,
+                                       struct intel_crtc_state *new_crtc_state,
                                        const struct intel_plane_state *old_plane_state,
-                                       struct intel_plane_state *intel_state)
+                                       struct intel_plane_state *new_plane_state)
 {
-       struct drm_plane *plane = intel_state->base.plane;
-       struct drm_plane_state *state = &intel_state->base;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
        int ret;
 
-       crtc_state->active_planes &= ~BIT(intel_plane->id);
-       crtc_state->nv12_planes &= ~BIT(intel_plane->id);
-       intel_state->base.visible = false;
+       new_crtc_state->active_planes &= ~BIT(plane->id);
+       new_crtc_state->nv12_planes &= ~BIT(plane->id);
+       new_plane_state->base.visible = false;
 
-       /* If this is a cursor plane, no further checks are needed. */
-       if (!intel_state->base.crtc && !old_plane_state->base.crtc)
+       if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
                return 0;
 
-       ret = intel_plane->check_plane(crtc_state, intel_state);
+       ret = plane->check_plane(new_crtc_state, new_plane_state);
        if (ret)
                return ret;
 
        /* FIXME pre-g4x don't work like this */
-       if (state->visible)
-               crtc_state->active_planes |= BIT(intel_plane->id);
+       if (new_plane_state->base.visible)
+               new_crtc_state->active_planes |= BIT(plane->id);
 
-       if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
-               crtc_state->nv12_planes |= BIT(intel_plane->id);
+       if (new_plane_state->base.visible &&
+           new_plane_state->base.fb->format->format == DRM_FORMAT_NV12)
+               new_crtc_state->nv12_planes |= BIT(plane->id);
 
-       if (state->visible || old_plane_state->base.visible)
-               crtc_state->update_planes |= BIT(intel_plane->id);
+       if (new_plane_state->base.visible || old_plane_state->base.visible)
+               new_crtc_state->update_planes |= BIT(plane->id);
 
        return intel_plane_atomic_calc_changes(old_crtc_state,
-                                              &crtc_state->base,
+                                              &new_crtc_state->base,
                                               old_plane_state,
-                                              state);
+                                              &new_plane_state->base);
 }
 
 static int intel_plane_atomic_check(struct drm_plane *plane,
@@ -312,7 +309,7 @@ int
 intel_plane_atomic_get_property(struct drm_plane *plane,
                                const struct drm_plane_state *state,
                                struct drm_property *property,
-                               uint64_t *val)
+                               u64 *val)
 {
        DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
                      property->base.id, property->name);
@@ -335,7 +332,7 @@ int
 intel_plane_atomic_set_property(struct drm_plane *plane,
                                struct drm_plane_state *state,
                                struct drm_property *property,
-                               uint64_t val)
+                               u64 val)
 {
        DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
                      property->base.id, property->name);
index ae55a6865d5cca98f8738cbf8db4cffe5039bfa2..de26cd0a54979672aadec40c1738e207da5a6afd 100644 (file)
@@ -27,7 +27,6 @@
 #include <drm/intel_lpe_audio.h>
 #include "intel_drv.h"
 
-#include <drm/drmP.h>
 #include <drm/drm_edid.h>
 #include "i915_drv.h"
 
@@ -749,7 +748,8 @@ static void i915_audio_component_get_power(struct device *kdev)
 
 static void i915_audio_component_put_power(struct device *kdev)
 {
-       intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
+       intel_display_power_put_unchecked(kdev_to_i915(kdev),
+                                         POWER_DOMAIN_AUDIO);
 }
 
 static void i915_audio_component_codec_wake_override(struct device *kdev,
@@ -758,7 +758,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        u32 tmp;
 
-       if (!IS_GEN9(dev_priv))
+       if (!IS_GEN(dev_priv, 9))
                return;
 
        i915_audio_component_get_power(kdev);
index 6d3e0260d49cda5b2d5ffa36152ef6eb544cc14e..b508d8a735e0347637274aebb2a5eaed29dda2fd 100644 (file)
@@ -26,7 +26,6 @@
  */
 
 #include <drm/drm_dp_helper.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
         * Only parse SDVO mappings on gens that could have SDVO. This isn't
         * accurate and doesn't have to be, as long as it's not too strict.
         */
-       if (!IS_GEN(dev_priv, 3, 7)) {
+       if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
                DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
                return;
        }
@@ -1386,8 +1385,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        info->supports_dp = is_dp;
        info->supports_edp = is_edp;
 
-       DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
-                     port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+       if (bdb_version >= 195)
+               info->supports_typec_usb = child->dp_usb_type_c;
+
+       if (bdb_version >= 209)
+               info->supports_tbt = child->tbt;
+
+       DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
+                     port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
+                     info->supports_typec_usb, info->supports_tbt);
 
        if (is_edp && is_dvi)
                DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
@@ -1657,6 +1663,13 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
                struct ddi_vbt_port_info *info =
                        &dev_priv->vbt.ddi_port_info[port];
 
+               /*
+                * VBT has the TypeC mode (native,TBT/USB) and we don't want
+                * to detect it.
+                */
+               if (intel_port_is_tc(dev_priv, port))
+                       continue;
+
                info->supports_dvi = (port != PORT_A && port != PORT_E);
                info->supports_hdmi = info->supports_dvi;
                info->supports_dp = (port != PORT_E);
@@ -1940,6 +1953,15 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
        };
        int i;
 
+       if (HAS_DDI(dev_priv)) {
+               const struct ddi_vbt_port_info *port_info =
+                       &dev_priv->vbt.ddi_port_info[port];
+
+               return port_info->supports_dp ||
+                      port_info->supports_dvi ||
+                      port_info->supports_hdmi;
+       }
+
        /* FIXME maybe deal with port A as well? */
        if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
                return false;
index 447c5256f63a9399f39f2439a558e3b88e4205ae..cacaa1d04d174cab452231ae622c16b0cdbb98b1 100644 (file)
 
 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
 
-static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
+static void irq_enable(struct intel_engine_cs *engine)
 {
-       struct intel_wait *wait;
-       unsigned int result = 0;
-
-       lockdep_assert_held(&b->irq_lock);
-
-       wait = b->irq_wait;
-       if (wait) {
-               /*
-                * N.B. Since task_asleep() and ttwu are not atomic, the
-                * waiter may actually go to sleep after the check, causing
-                * us to suppress a valid wakeup. We prefer to reduce the
-                * number of false positive missed_breadcrumb() warnings
-                * at the expense of a few false negatives, as it it easy
-                * to trigger a false positive under heavy load. Enough
-                * signal should remain from genuine missed_breadcrumb()
-                * for us to detect in CI.
-                */
-               bool was_asleep = task_asleep(wait->tsk);
-
-               result = ENGINE_WAKEUP_WAITER;
-               if (wake_up_process(wait->tsk) && was_asleep)
-                       result |= ENGINE_WAKEUP_ASLEEP;
-       }
+       if (!engine->irq_enable)
+               return;
 
-       return result;
+       /* Caller disables interrupts */
+       spin_lock(&engine->i915->irq_lock);
+       engine->irq_enable(engine);
+       spin_unlock(&engine->i915->irq_lock);
 }
 
-unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
+static void irq_disable(struct intel_engine_cs *engine)
 {
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       unsigned long flags;
-       unsigned int result;
-
-       spin_lock_irqsave(&b->irq_lock, flags);
-       result = __intel_breadcrumbs_wakeup(b);
-       spin_unlock_irqrestore(&b->irq_lock, flags);
-
-       return result;
-}
+       if (!engine->irq_disable)
+               return;
 
-static unsigned long wait_timeout(void)
-{
-       return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+       /* Caller disables interrupts */
+       spin_lock(&engine->i915->irq_lock);
+       engine->irq_disable(engine);
+       spin_unlock(&engine->i915->irq_lock);
 }
 
-static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
+static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
 {
-       if (GEM_SHOW_DEBUG()) {
-               struct drm_printer p = drm_debug_printer(__func__);
+       lockdep_assert_held(&b->irq_lock);
 
-               intel_engine_dump(engine, &p,
-                                 "%s missed breadcrumb at %pS\n",
-                                 engine->name, __builtin_return_address(0));
-       }
+       GEM_BUG_ON(!b->irq_enabled);
+       if (!--b->irq_enabled)
+               irq_disable(container_of(b,
+                                        struct intel_engine_cs,
+                                        breadcrumbs));
 
-       set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+       b->irq_armed = false;
 }
 
-static void intel_breadcrumbs_hangcheck(struct timer_list *t)
+void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
 {
-       struct intel_engine_cs *engine =
-               from_timer(engine, t, breadcrumbs.hangcheck);
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       unsigned int irq_count;
 
        if (!b->irq_armed)
                return;
 
-       irq_count = READ_ONCE(b->irq_count);
-       if (b->hangcheck_interrupts != irq_count) {
-               b->hangcheck_interrupts = irq_count;
-               mod_timer(&b->hangcheck, wait_timeout());
-               return;
-       }
+       spin_lock_irq(&b->irq_lock);
+       if (b->irq_armed)
+               __intel_breadcrumbs_disarm_irq(b);
+       spin_unlock_irq(&b->irq_lock);
+}
 
-       /* We keep the hangcheck timer alive until we disarm the irq, even
-        * if there are no waiters at present.
-        *
-        * If the waiter was currently running, assume it hasn't had a chance
-        * to process the pending interrupt (e.g, low priority task on a loaded
-        * system) and wait until it sleeps before declaring a missed interrupt.
-        *
-        * If the waiter was asleep (and not even pending a wakeup), then we
-        * must have missed an interrupt as the GPU has stopped advancing
-        * but we still have a waiter. Assuming all batches complete within
-        * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
-        */
-       if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
-               missed_breadcrumb(engine);
-               mod_timer(&b->fake_irq, jiffies + 1);
-       } else {
-               mod_timer(&b->hangcheck, wait_timeout());
-       }
+static inline bool __request_completed(const struct i915_request *rq)
+{
+       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
 }
 
-static void intel_breadcrumbs_fake_irq(struct timer_list *t)
+bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
 {
-       struct intel_engine_cs *engine =
-               from_timer(engine, t, breadcrumbs.fake_irq);
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct intel_context *ce, *cn;
+       struct list_head *pos, *next;
+       LIST_HEAD(signal);
 
-       /*
-        * The timer persists in case we cannot enable interrupts,
-        * or if we have previously seen seqno/interrupt incoherency
-        * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
-        * Here the worker will wake up every jiffie in order to kick the
-        * oldest waiter to do the coherent seqno check.
-        */
+       spin_lock(&b->irq_lock);
 
-       spin_lock_irq(&b->irq_lock);
-       if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
-               __intel_engine_disarm_breadcrumbs(engine);
-       spin_unlock_irq(&b->irq_lock);
-       if (!b->irq_armed)
-               return;
+       if (b->irq_armed && list_empty(&b->signalers))
+               __intel_breadcrumbs_disarm_irq(b);
 
-       /* If the user has disabled the fake-irq, restore the hangchecking */
-       if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
-               mod_timer(&b->hangcheck, wait_timeout());
-               return;
-       }
+       list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
+               GEM_BUG_ON(list_empty(&ce->signals));
 
-       mod_timer(&b->fake_irq, jiffies + 1);
-}
+               list_for_each_safe(pos, next, &ce->signals) {
+                       struct i915_request *rq =
+                               list_entry(pos, typeof(*rq), signal_link);
 
-static void irq_enable(struct intel_engine_cs *engine)
-{
-       /*
-        * FIXME: Ideally we want this on the API boundary, but for the
-        * sake of testing with mock breadcrumbs (no HW so unable to
-        * enable irqs) we place it deep within the bowels, at the point
-        * of no return.
-        */
-       GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
+                       if (!__request_completed(rq))
+                               break;
 
-       /* Enabling the IRQ may miss the generation of the interrupt, but
-        * we still need to force the barrier before reading the seqno,
-        * just in case.
-        */
-       set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+                       GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
+                                            &rq->fence.flags));
+                       clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
 
-       /* Caller disables interrupts */
-       if (engine->irq_enable) {
-               spin_lock(&engine->i915->irq_lock);
-               engine->irq_enable(engine);
-               spin_unlock(&engine->i915->irq_lock);
+                       /*
+                        * We may race with direct invocation of
+                        * dma_fence_signal(), e.g. i915_request_retire(),
+                        * in which case we can skip processing it ourselves.
+                        */
+                       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                                    &rq->fence.flags))
+                               continue;
+
+                       /*
+                        * Queue for execution after dropping the signaling
+                        * spinlock as the callback chain may end up adding
+                        * more signalers to the same context or engine.
+                        */
+                       i915_request_get(rq);
+                       list_add_tail(&rq->signal_link, &signal);
+               }
+
+               /*
+                * We process the list deletion in bulk, only using a list_add
+                * (not list_move) above but keeping the status of
+                * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
+                */
+               if (!list_is_first(pos, &ce->signals)) {
+                       /* Advance the list to the first incomplete request */
+                       __list_del_many(&ce->signals, pos);
+                       if (&ce->signals == pos) /* now empty */
+                               list_del_init(&ce->signal_link);
+               }
        }
-}
 
-static void irq_disable(struct intel_engine_cs *engine)
-{
-       /* Caller disables interrupts */
-       if (engine->irq_disable) {
-               spin_lock(&engine->i915->irq_lock);
-               engine->irq_disable(engine);
-               spin_unlock(&engine->i915->irq_lock);
+       spin_unlock(&b->irq_lock);
+
+       list_for_each_safe(pos, next, &signal) {
+               struct i915_request *rq =
+                       list_entry(pos, typeof(*rq), signal_link);
+
+               dma_fence_signal(&rq->fence);
+               i915_request_put(rq);
        }
+
+       return !list_empty(&signal);
 }
 
-void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
+bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
 {
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       bool result;
 
-       lockdep_assert_held(&b->irq_lock);
-       GEM_BUG_ON(b->irq_wait);
-       GEM_BUG_ON(!b->irq_armed);
+       local_irq_disable();
+       result = intel_engine_breadcrumbs_irq(engine);
+       local_irq_enable();
 
-       GEM_BUG_ON(!b->irq_enabled);
-       if (!--b->irq_enabled)
-               irq_disable(engine);
+       return result;
+}
 
-       b->irq_armed = false;
+static void signal_irq_work(struct irq_work *work)
+{
+       struct intel_engine_cs *engine =
+               container_of(work, typeof(*engine), breadcrumbs.irq_work);
+
+       intel_engine_breadcrumbs_irq(engine);
 }
 
 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
@@ -227,666 +193,155 @@ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
        spin_unlock_irq(&b->irq_lock);
 }
 
-void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct intel_wait *wait, *n;
-
-       if (!b->irq_armed)
-               return;
-
-       /*
-        * We only disarm the irq when we are idle (all requests completed),
-        * so if the bottom-half remains asleep, it missed the request
-        * completion.
-        */
-       if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
-               missed_breadcrumb(engine);
-
-       spin_lock_irq(&b->rb_lock);
-
-       spin_lock(&b->irq_lock);
-       b->irq_wait = NULL;
-       if (b->irq_armed)
-               __intel_engine_disarm_breadcrumbs(engine);
-       spin_unlock(&b->irq_lock);
-
-       rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
-               GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
-               RB_CLEAR_NODE(&wait->node);
-               wake_up_process(wait->tsk);
-       }
-       b->waiters = RB_ROOT;
-
-       spin_unlock_irq(&b->rb_lock);
-}
-
-static bool use_fake_irq(const struct intel_breadcrumbs *b)
-{
-       const struct intel_engine_cs *engine =
-               container_of(b, struct intel_engine_cs, breadcrumbs);
-
-       if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
-               return false;
-
-       /*
-        * Only start with the heavy weight fake irq timer if we have not
-        * seen any interrupts since enabling it the first time. If the
-        * interrupts are still arriving, it means we made a mistake in our
-        * engine->seqno_barrier(), a timing error that should be transient
-        * and unlikely to reoccur.
-        */
-       return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
-}
-
-static void enable_fake_irq(struct intel_breadcrumbs *b)
-{
-       /* Ensure we never sleep indefinitely */
-       if (!b->irq_enabled || use_fake_irq(b))
-               mod_timer(&b->fake_irq, jiffies + 1);
-       else
-               mod_timer(&b->hangcheck, wait_timeout());
-}
-
-static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
 {
        struct intel_engine_cs *engine =
                container_of(b, struct intel_engine_cs, breadcrumbs);
-       struct drm_i915_private *i915 = engine->i915;
-       bool enabled;
 
        lockdep_assert_held(&b->irq_lock);
        if (b->irq_armed)
-               return false;
+               return;
 
-       /* The breadcrumb irq will be disarmed on the interrupt after the
+       /*
+        * The breadcrumb irq will be disarmed on the interrupt after the
         * waiters are signaled. This gives us a single interrupt window in
         * which we can add a new waiter and avoid the cost of re-enabling
         * the irq.
         */
        b->irq_armed = true;
 
-       if (I915_SELFTEST_ONLY(b->mock)) {
-               /* For our mock objects we want to avoid interaction
-                * with the real hardware (which is not set up). So
-                * we simply pretend we have enabled the powerwell
-                * and the irq, and leave it up to the mock
-                * implementation to call intel_engine_wakeup()
-                * itself when it wants to simulate a user interrupt,
-                */
-               return true;
-       }
-
-       /* Since we are waiting on a request, the GPU should be busy
+       /*
+        * Since we are waiting on a request, the GPU should be busy
         * and should have its own rpm reference. This is tracked
         * by i915->gt.awake, we can forgo holding our own wakref
         * for the interrupt as before i915->gt.awake is released (when
         * the driver is idle) we disarm the breadcrumbs.
         */
 
-       /* No interrupts? Kick the waiter every jiffie! */
-       enabled = false;
-       if (!b->irq_enabled++ &&
-           !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
+       if (!b->irq_enabled++)
                irq_enable(engine);
-               enabled = true;
-       }
-
-       enable_fake_irq(b);
-       return enabled;
-}
-
-static inline struct intel_wait *to_wait(struct rb_node *node)
-{
-       return rb_entry(node, struct intel_wait, node);
-}
-
-static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
-                                             struct intel_wait *wait)
-{
-       lockdep_assert_held(&b->rb_lock);
-       GEM_BUG_ON(b->irq_wait == wait);
-
-       /*
-        * This request is completed, so remove it from the tree, mark it as
-        * complete, and *then* wake up the associated task. N.B. when the
-        * task wakes up, it will find the empty rb_node, discern that it
-        * has already been removed from the tree and skip the serialisation
-        * of the b->rb_lock and b->irq_lock. This means that the destruction
-        * of the intel_wait is not serialised with the interrupt handler
-        * by the waiter - it must instead be serialised by the caller.
-        */
-       rb_erase(&wait->node, &b->waiters);
-       RB_CLEAR_NODE(&wait->node);
-
-       if (wait->tsk->state != TASK_RUNNING)
-               wake_up_process(wait->tsk); /* implicit smp_wmb() */
-}
-
-static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
-                                           struct rb_node *next)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
-       spin_lock(&b->irq_lock);
-       GEM_BUG_ON(!b->irq_armed);
-       GEM_BUG_ON(!b->irq_wait);
-       b->irq_wait = to_wait(next);
-       spin_unlock(&b->irq_lock);
-
-       /* We always wake up the next waiter that takes over as the bottom-half
-        * as we may delegate not only the irq-seqno barrier to the next waiter
-        * but also the task of waking up concurrent waiters.
-        */
-       if (next)
-               wake_up_process(to_wait(next)->tsk);
 }
 
-static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
-                                   struct intel_wait *wait)
+void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct rb_node **p, *parent, *completed;
-       bool first, armed;
-       u32 seqno;
-
-       GEM_BUG_ON(!wait->seqno);
-
-       /* Insert the request into the retirement ordered list
-        * of waiters by walking the rbtree. If we are the oldest
-        * seqno in the tree (the first to be retired), then
-        * set ourselves as the bottom-half.
-        *
-        * As we descend the tree, prune completed branches since we hold the
-        * spinlock we know that the first_waiter must be delayed and can
-        * reduce some of the sequential wake up latency if we take action
-        * ourselves and wake up the completed tasks in parallel. Also, by
-        * removing stale elements in the tree, we may be able to reduce the
-        * ping-pong between the old bottom-half and ourselves as first-waiter.
-        */
-       armed = false;
-       first = true;
-       parent = NULL;
-       completed = NULL;
-       seqno = intel_engine_get_seqno(engine);
-
-        /* If the request completed before we managed to grab the spinlock,
-         * return now before adding ourselves to the rbtree. We let the
-         * current bottom-half handle any pending wakeups and instead
-         * try and get out of the way quickly.
-         */
-       if (i915_seqno_passed(seqno, wait->seqno)) {
-               RB_CLEAR_NODE(&wait->node);
-               return first;
-       }
-
-       p = &b->waiters.rb_node;
-       while (*p) {
-               parent = *p;
-               if (wait->seqno == to_wait(parent)->seqno) {
-                       /* We have multiple waiters on the same seqno, select
-                        * the highest priority task (that with the smallest
-                        * task->prio) to serve as the bottom-half for this
-                        * group.
-                        */
-                       if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
-                               p = &parent->rb_right;
-                               first = false;
-                       } else {
-                               p = &parent->rb_left;
-                       }
-               } else if (i915_seqno_passed(wait->seqno,
-                                            to_wait(parent)->seqno)) {
-                       p = &parent->rb_right;
-                       if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
-                               completed = parent;
-                       else
-                               first = false;
-               } else {
-                       p = &parent->rb_left;
-               }
-       }
-       rb_link_node(&wait->node, parent, p);
-       rb_insert_color(&wait->node, &b->waiters);
-
-       if (first) {
-               spin_lock(&b->irq_lock);
-               b->irq_wait = wait;
-               /* After assigning ourselves as the new bottom-half, we must
-                * perform a cursory check to prevent a missed interrupt.
-                * Either we miss the interrupt whilst programming the hardware,
-                * or if there was a previous waiter (for a later seqno) they
-                * may be woken instead of us (due to the inherent race
-                * in the unlocked read of b->irq_seqno_bh in the irq handler)
-                * and so we miss the wake up.
-                */
-               armed = __intel_breadcrumbs_enable_irq(b);
-               spin_unlock(&b->irq_lock);
-       }
 
-       if (completed) {
-               /* Advance the bottom-half (b->irq_wait) before we wake up
-                * the waiters who may scribble over their intel_wait
-                * just as the interrupt handler is dereferencing it via
-                * b->irq_wait.
-                */
-               if (!first) {
-                       struct rb_node *next = rb_next(completed);
-                       GEM_BUG_ON(next == &wait->node);
-                       __intel_breadcrumbs_next(engine, next);
-               }
-
-               do {
-                       struct intel_wait *crumb = to_wait(completed);
-                       completed = rb_prev(completed);
-                       __intel_breadcrumbs_finish(b, crumb);
-               } while (completed);
-       }
-
-       GEM_BUG_ON(!b->irq_wait);
-       GEM_BUG_ON(!b->irq_armed);
-       GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
+       spin_lock_init(&b->irq_lock);
+       INIT_LIST_HEAD(&b->signalers);
 
-       return armed;
+       init_irq_work(&b->irq_work, signal_irq_work);
 }
 
-bool intel_engine_add_wait(struct intel_engine_cs *engine,
-                          struct intel_wait *wait)
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       bool armed;
-
-       spin_lock_irq(&b->rb_lock);
-       armed = __intel_engine_add_wait(engine, wait);
-       spin_unlock_irq(&b->rb_lock);
-       if (armed)
-               return armed;
-
-       /* Make the caller recheck if its request has already started. */
-       return intel_engine_has_started(engine, wait->seqno);
-}
+       unsigned long flags;
 
-static inline bool chain_wakeup(struct rb_node *rb, int priority)
-{
-       return rb && to_wait(rb)->tsk->prio <= priority;
-}
+       spin_lock_irqsave(&b->irq_lock, flags);
 
-static inline int wakeup_priority(struct intel_breadcrumbs *b,
-                                 struct task_struct *tsk)
-{
-       if (tsk == b->signaler)
-               return INT_MIN;
+       if (b->irq_enabled)
+               irq_enable(engine);
        else
-               return tsk->prio;
-}
-
-static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
-                                      struct intel_wait *wait)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
-       lockdep_assert_held(&b->rb_lock);
-
-       if (RB_EMPTY_NODE(&wait->node))
-               goto out;
-
-       if (b->irq_wait == wait) {
-               const int priority = wakeup_priority(b, wait->tsk);
-               struct rb_node *next;
-
-               /* We are the current bottom-half. Find the next candidate,
-                * the first waiter in the queue on the remaining oldest
-                * request. As multiple seqnos may complete in the time it
-                * takes us to wake up and find the next waiter, we have to
-                * wake up that waiter for it to perform its own coherent
-                * completion check.
-                */
-               next = rb_next(&wait->node);
-               if (chain_wakeup(next, priority)) {
-                       /* If the next waiter is already complete,
-                        * wake it up and continue onto the next waiter. So
-                        * if have a small herd, they will wake up in parallel
-                        * rather than sequentially, which should reduce
-                        * the overall latency in waking all the completed
-                        * clients.
-                        *
-                        * However, waking up a chain adds extra latency to
-                        * the first_waiter. This is undesirable if that
-                        * waiter is a high priority task.
-                        */
-                       u32 seqno = intel_engine_get_seqno(engine);
-
-                       while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
-                               struct rb_node *n = rb_next(next);
-
-                               __intel_breadcrumbs_finish(b, to_wait(next));
-                               next = n;
-                               if (!chain_wakeup(next, priority))
-                                       break;
-                       }
-               }
-
-               __intel_breadcrumbs_next(engine, next);
-       } else {
-               GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
-       }
-
-       GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
-       rb_erase(&wait->node, &b->waiters);
-       RB_CLEAR_NODE(&wait->node);
+               irq_disable(engine);
 
-out:
-       GEM_BUG_ON(b->irq_wait == wait);
-       GEM_BUG_ON(rb_first(&b->waiters) !=
-                  (b->irq_wait ? &b->irq_wait->node : NULL));
+       spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
-void intel_engine_remove_wait(struct intel_engine_cs *engine,
-                             struct intel_wait *wait)
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 {
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
-       /* Quick check to see if this waiter was already decoupled from
-        * the tree by the bottom-half to avoid contention on the spinlock
-        * by the herd.
-        */
-       if (RB_EMPTY_NODE(&wait->node)) {
-               GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
-               return;
-       }
-
-       spin_lock_irq(&b->rb_lock);
-       __intel_engine_remove_wait(engine, wait);
-       spin_unlock_irq(&b->rb_lock);
 }
 
-static void signaler_set_rtpriority(void)
+bool i915_request_enable_breadcrumb(struct i915_request *rq)
 {
-        struct sched_param param = { .sched_priority = 1 };
-
-        sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
-}
+       struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
 
-static int intel_breadcrumbs_signaler(void *arg)
-{
-       struct intel_engine_cs *engine = arg;
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct i915_request *rq, *n;
+       GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
 
-       /* Install ourselves with high priority to reduce signalling latency */
-       signaler_set_rtpriority();
+       if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
+               return true;
 
-       do {
-               bool do_schedule = true;
-               LIST_HEAD(list);
-               u32 seqno;
+       spin_lock(&b->irq_lock);
+       if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
+           !__request_completed(rq)) {
+               struct intel_context *ce = rq->hw_context;
+               struct list_head *pos;
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (list_empty(&b->signals))
-                       goto sleep;
+               __intel_breadcrumbs_arm_irq(b);
 
                /*
-                * We are either woken up by the interrupt bottom-half,
-                * or by a client adding a new signaller. In both cases,
-                * the GPU seqno may have advanced beyond our oldest signal.
-                * If it has, propagate the signal, remove the waiter and
-                * check again with the next oldest signal. Otherwise we
-                * need to wait for a new interrupt from the GPU or for
-                * a new client.
+                * We keep the seqno in retirement order, so we can break
+                * inside intel_engine_breadcrumbs_irq as soon as we've passed
+                * the last completed request (or seen a request that hasn't
+                * event started). We could iterate the timeline->requests list,
+                * but keeping a separate signalers_list has the advantage of
+                * hopefully being much smaller than the full list and so
+                * provides faster iteration and detection when there are no
+                * more interrupts required for this context.
+                *
+                * We typically expect to add new signalers in order, so we
+                * start looking for our insertion point from the tail of
+                * the list.
                 */
-               seqno = intel_engine_get_seqno(engine);
-
-               spin_lock_irq(&b->rb_lock);
-               list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
-                       u32 this = rq->signaling.wait.seqno;
-
-                       GEM_BUG_ON(!rq->signaling.wait.seqno);
-
-                       if (!i915_seqno_passed(seqno, this))
-                               break;
-
-                       if (likely(this == i915_request_global_seqno(rq))) {
-                               __intel_engine_remove_wait(engine,
-                                                          &rq->signaling.wait);
+               list_for_each_prev(pos, &ce->signals) {
+                       struct i915_request *it =
+                               list_entry(pos, typeof(*it), signal_link);
 
-                               rq->signaling.wait.seqno = 0;
-                               __list_del_entry(&rq->signaling.link);
-
-                               if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                             &rq->fence.flags)) {
-                                       list_add_tail(&rq->signaling.link,
-                                                     &list);
-                                       i915_request_get(rq);
-                               }
-                       }
-               }
-               spin_unlock_irq(&b->rb_lock);
-
-               if (!list_empty(&list)) {
-                       local_bh_disable();
-                       list_for_each_entry_safe(rq, n, &list, signaling.link) {
-                               dma_fence_signal(&rq->fence);
-                               GEM_BUG_ON(!i915_request_completed(rq));
-                               i915_request_put(rq);
-                       }
-                       local_bh_enable(); /* kick start the tasklets */
-
-                       /*
-                        * If the engine is saturated we may be continually
-                        * processing completed requests. This angers the
-                        * NMI watchdog if we never let anything else
-                        * have access to the CPU. Let's pretend to be nice
-                        * and relinquish the CPU if we burn through the
-                        * entire RT timeslice!
-                        */
-                       do_schedule = need_resched();
-               }
-
-               if (unlikely(do_schedule)) {
-                       /* Before we sleep, check for a missed seqno */
-                       if (current->state & TASK_NORMAL &&
-                           !list_empty(&b->signals) &&
-                           engine->irq_seqno_barrier &&
-                           test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
-                                              &engine->irq_posted)) {
-                               engine->irq_seqno_barrier(engine);
-                               intel_engine_wakeup(engine);
-                       }
-
-sleep:
-                       if (kthread_should_park())
-                               kthread_parkme();
-
-                       if (unlikely(kthread_should_stop()))
+                       if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
                                break;
-
-                       schedule();
                }
-       } while (1);
-       __set_current_state(TASK_RUNNING);
+               list_add(&rq->signal_link, pos);
+               if (pos == &ce->signals) /* catch transitions from empty list */
+                       list_move_tail(&ce->signal_link, &b->signalers);
 
-       return 0;
-}
-
-static void insert_signal(struct intel_breadcrumbs *b,
-                         struct i915_request *request,
-                         const u32 seqno)
-{
-       struct i915_request *iter;
-
-       lockdep_assert_held(&b->rb_lock);
-
-       /*
-        * A reasonable assumption is that we are called to add signals
-        * in sequence, as the requests are submitted for execution and
-        * assigned a global_seqno. This will be the case for the majority
-        * of internally generated signals (inter-engine signaling).
-        *
-        * Out of order waiters triggering random signaling enabling will
-        * be more problematic, but hopefully rare enough and the list
-        * small enough that the O(N) insertion sort is not an issue.
-        */
-
-       list_for_each_entry_reverse(iter, &b->signals, signaling.link)
-               if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
-                       break;
-
-       list_add(&request->signaling.link, &iter->signaling.link);
-}
-
-bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
-{
-       struct intel_engine_cs *engine = request->engine;
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct intel_wait *wait = &request->signaling.wait;
-       u32 seqno;
-
-       /*
-        * Note that we may be called from an interrupt handler on another
-        * device (e.g. nouveau signaling a fence completion causing us
-        * to submit a request, and so enable signaling). As such,
-        * we need to make sure that all other users of b->rb_lock protect
-        * against interrupts, i.e. use spin_lock_irqsave.
-        */
-
-       /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
-       GEM_BUG_ON(!irqs_disabled());
-       lockdep_assert_held(&request->lock);
-
-       seqno = i915_request_global_seqno(request);
-       if (!seqno) /* will be enabled later upon execution */
-               return true;
-
-       GEM_BUG_ON(wait->seqno);
-       wait->tsk = b->signaler;
-       wait->request = request;
-       wait->seqno = seqno;
-
-       /*
-        * Add ourselves into the list of waiters, but registering our
-        * bottom-half as the signaller thread. As per usual, only the oldest
-        * waiter (not just signaller) is tasked as the bottom-half waking
-        * up all completed waiters after the user interrupt.
-        *
-        * If we are the oldest waiter, enable the irq (after which we
-        * must double check that the seqno did not complete).
-        */
-       spin_lock(&b->rb_lock);
-       insert_signal(b, request, seqno);
-       wakeup &= __intel_engine_add_wait(engine, wait);
-       spin_unlock(&b->rb_lock);
-
-       if (wakeup) {
-               wake_up_process(b->signaler);
-               return !intel_wait_complete(wait);
+               set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
        }
+       spin_unlock(&b->irq_lock);
 
-       return true;
+       return !__request_completed(rq);
 }
 
-void intel_engine_cancel_signaling(struct i915_request *request)
+void i915_request_cancel_breadcrumb(struct i915_request *rq)
 {
-       struct intel_engine_cs *engine = request->engine;
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
 
-       GEM_BUG_ON(!irqs_disabled());
-       lockdep_assert_held(&request->lock);
-
-       if (!READ_ONCE(request->signaling.wait.seqno))
+       if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
                return;
 
-       spin_lock(&b->rb_lock);
-       __intel_engine_remove_wait(engine, &request->signaling.wait);
-       if (fetch_and_zero(&request->signaling.wait.seqno))
-               __list_del_entry(&request->signaling.link);
-       spin_unlock(&b->rb_lock);
-}
-
-int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct task_struct *tsk;
-
-       spin_lock_init(&b->rb_lock);
-       spin_lock_init(&b->irq_lock);
-
-       timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
-       timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
-
-       INIT_LIST_HEAD(&b->signals);
-
-       /* Spawn a thread to provide a common bottom-half for all signals.
-        * As this is an asynchronous interface we cannot steal the current
-        * task for handling the bottom-half to the user interrupt, therefore
-        * we create a thread to do the coherent seqno dance after the
-        * interrupt and then signal the waitqueue (via the dma-buf/fence).
-        */
-       tsk = kthread_run(intel_breadcrumbs_signaler, engine,
-                         "i915/signal:%d", engine->id);
-       if (IS_ERR(tsk))
-               return PTR_ERR(tsk);
-
-       b->signaler = tsk;
-
-       return 0;
-}
-
-static void cancel_fake_irq(struct intel_engine_cs *engine)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
-       del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
-       del_timer_sync(&b->hangcheck);
-       clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
-}
-
-void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       unsigned long flags;
-
-       spin_lock_irqsave(&b->irq_lock, flags);
-
-       /*
-        * Leave the fake_irq timer enabled (if it is running), but clear the
-        * bit so that it turns itself off on its next wake up and goes back
-        * to the long hangcheck interval if still required.
-        */
-       clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
-
-       if (b->irq_enabled)
-               irq_enable(engine);
-       else
-               irq_disable(engine);
+       spin_lock(&b->irq_lock);
+       if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+               struct intel_context *ce = rq->hw_context;
 
-       /*
-        * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
-        * GPU is active and may have already executed the MI_USER_INTERRUPT
-        * before the CPU is ready to receive. However, the engine is currently
-        * idle (we haven't started it yet), there is no possibility for a
-        * missed interrupt as we enabled the irq and so we can clear the
-        * immediate wakeup (until a real interrupt arrives for the waiter).
-        */
-       clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+               list_del(&rq->signal_link);
+               if (list_empty(&ce->signals))
+                       list_del_init(&ce->signal_link);
 
-       spin_unlock_irqrestore(&b->irq_lock, flags);
+               clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+       }
+       spin_unlock(&b->irq_lock);
 }
 
-void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+                                   struct drm_printer *p)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct intel_context *ce;
+       struct i915_request *rq;
 
-       /* The engines should be idle and all requests accounted for! */
-       WARN_ON(READ_ONCE(b->irq_wait));
-       WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
-       WARN_ON(!list_empty(&b->signals));
+       if (list_empty(&b->signalers))
+               return;
 
-       if (!IS_ERR_OR_NULL(b->signaler))
-               kthread_stop(b->signaler);
+       drm_printf(p, "Signals:\n");
 
-       cancel_fake_irq(engine);
+       spin_lock_irq(&b->irq_lock);
+       list_for_each_entry(ce, &b->signalers, signal_link) {
+               list_for_each_entry(rq, &ce->signals, signal_link) {
+                       drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
+                                  rq->fence.context, rq->fence.seqno,
+                                  i915_request_completed(rq) ? "!" :
+                                  i915_request_started(rq) ? "*" :
+                                  "",
+                                  jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+               }
+       }
+       spin_unlock_irq(&b->irq_lock);
 }
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_breadcrumbs.c"
-#endif
index 25e3aba9cded6e45f3751039a3f7bb20845dd7b8..15ba950dee00e789738e4362a7e3050ef9caf1d4 100644 (file)
@@ -218,7 +218,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
        };
        const unsigned int *vco_table;
        unsigned int vco;
-       uint8_t tmp = 0;
+       u8 tmp = 0;
 
        /* FIXME other chipsets? */
        if (IS_GM45(dev_priv))
@@ -249,13 +249,13 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
                          struct intel_cdclk_state *cdclk_state)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
-       static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
-       static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
-       static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
-       static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
-       const uint8_t *div_table;
+       static const u8 div_3200[] = { 12, 10,  8,  7, 5, 16 };
+       static const u8 div_4000[] = { 14, 12, 10,  8, 6, 20 };
+       static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
+       static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
+       const u8 *div_table;
        unsigned int cdclk_sel;
-       uint16_t tmp = 0;
+       u16 tmp = 0;
 
        cdclk_state->vco = intel_hpll_vco(dev_priv);
 
@@ -330,12 +330,12 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
                             struct intel_cdclk_state *cdclk_state)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
-       static const uint8_t div_3200[] = { 16, 10,  8 };
-       static const uint8_t div_4000[] = { 20, 12, 10 };
-       static const uint8_t div_5333[] = { 24, 16, 14 };
-       const uint8_t *div_table;
+       static const u8 div_3200[] = { 16, 10,  8 };
+       static const u8 div_4000[] = { 20, 12, 10 };
+       static const u8 div_5333[] = { 24, 16, 14 };
+       const u8 *div_table;
        unsigned int cdclk_sel;
-       uint16_t tmp = 0;
+       u16 tmp = 0;
 
        cdclk_state->vco = intel_hpll_vco(dev_priv);
 
@@ -375,7 +375,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int cdclk_sel;
-       uint16_t tmp = 0;
+       u16 tmp = 0;
 
        cdclk_state->vco = intel_hpll_vco(dev_priv);
 
@@ -403,8 +403,8 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
 static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
                          struct intel_cdclk_state *cdclk_state)
 {
-       uint32_t lcpll = I915_READ(LCPLL_CTL);
-       uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+       u32 lcpll = I915_READ(LCPLL_CTL);
+       u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
        if (lcpll & LCPLL_CD_SOURCE_FCLK)
                cdclk_state->cdclk = 800000;
@@ -520,6 +520,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
 {
        int cdclk = cdclk_state->cdclk;
        u32 val, cmd = cdclk_state->voltage_level;
+       intel_wakeref_t wakeref;
 
        switch (cdclk) {
        case 400000:
@@ -539,7 +540,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
         * a system suspend.  So grab the PIPE-A domain, which covers
         * the HW blocks needed for the following programming.
         */
-       intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
        mutex_lock(&dev_priv->pcu_lock);
        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@@ -593,7 +594,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
 
        vlv_program_pfi_credits(dev_priv);
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
 }
 
 static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -601,6 +602,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
 {
        int cdclk = cdclk_state->cdclk;
        u32 val, cmd = cdclk_state->voltage_level;
+       intel_wakeref_t wakeref;
 
        switch (cdclk) {
        case 333333:
@@ -619,7 +621,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
         * a system suspend.  So grab the PIPE-A domain, which covers
         * the HW blocks needed for the following programming.
         */
-       intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
        mutex_lock(&dev_priv->pcu_lock);
        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@@ -637,7 +639,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
 
        vlv_program_pfi_credits(dev_priv);
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
 }
 
 static int bdw_calc_cdclk(int min_cdclk)
@@ -670,8 +672,8 @@ static u8 bdw_calc_voltage_level(int cdclk)
 static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
                          struct intel_cdclk_state *cdclk_state)
 {
-       uint32_t lcpll = I915_READ(LCPLL_CTL);
-       uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+       u32 lcpll = I915_READ(LCPLL_CTL);
+       u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
        if (lcpll & LCPLL_CD_SOURCE_FCLK)
                cdclk_state->cdclk = 800000;
@@ -698,7 +700,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
                          const struct intel_cdclk_state *cdclk_state)
 {
        int cdclk = cdclk_state->cdclk;
-       uint32_t val;
+       u32 val;
        int ret;
 
        if (WARN((I915_READ(LCPLL_CTL) &
@@ -1081,7 +1083,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
 
 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 {
-       uint32_t cdctl, expected;
+       u32 cdctl, expected;
 
        /*
         * check if the pre-os initialized the display
@@ -2140,7 +2142,7 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
 {
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return DIV_ROUND_UP(pixel_rate, 2);
-       else if (IS_GEN9(dev_priv) ||
+       else if (IS_GEN(dev_priv, 9) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return pixel_rate;
        else if (IS_CHERRYVIEW(dev_priv))
@@ -2176,7 +2178,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
                if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
                        /* Display WA #1145: glk,cnl */
                        min_cdclk = max(316800, min_cdclk);
-               } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
+               } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
                        /* Display WA #1144: skl,bxt */
                        min_cdclk = max(432000, min_cdclk);
                }
@@ -2537,7 +2539,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
 
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return 2 * max_cdclk_freq;
-       else if (IS_GEN9(dev_priv) ||
+       else if (IS_GEN(dev_priv, 9) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return max_cdclk_freq;
        else if (IS_CHERRYVIEW(dev_priv))
@@ -2688,7 +2690,7 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv)
 
 static int g4x_hrawclk(struct drm_i915_private *dev_priv)
 {
-       uint32_t clkcfg;
+       u32 clkcfg;
 
        /* hrawclock is 1/4 the FSB frequency */
        clkcfg = I915_READ(CLKCFG);
@@ -2785,9 +2787,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.get_cdclk = hsw_get_cdclk;
        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                dev_priv->display.get_cdclk = vlv_get_cdclk;
-       else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+       else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
                dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-       else if (IS_GEN5(dev_priv))
+       else if (IS_GEN(dev_priv, 5))
                dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
        else if (IS_GM45(dev_priv))
                dev_priv->display.get_cdclk = gm45_get_cdclk;
index 5127da286a2b4f61ca5a32ce00220e01a9397f93..71a1f12c6b2a5016a5846bacfb6d89b4960e4371 100644 (file)
 #define ILK_CSC_COEFF_1_0              \
        ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
 
-static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
+static bool lut_is_legacy(const struct drm_property_blob *lut)
 {
-       return !state->degamma_lut &&
-               !state->ctm &&
-               state->gamma_lut &&
-               drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
+       return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
+}
+
+static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
+{
+       return !crtc_state->base.degamma_lut &&
+               !crtc_state->base.ctm &&
+               crtc_state->base.gamma_lut &&
+               lut_is_legacy(crtc_state->base.gamma_lut);
 }
 
 /*
@@ -108,10 +113,10 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
        return result;
 }
 
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
 {
-       int pipe = intel_crtc->pipe;
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
 
        I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
        I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
@@ -132,29 +137,28 @@ static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
        I915_WRITE(PIPE_CSC_MODE(pipe), 0);
 }
 
-static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = crtc_state->crtc;
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int i, pipe = intel_crtc->pipe;
-       uint16_t coeffs[9] = { 0, };
-       struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        bool limited_color_range = false;
+       enum pipe pipe = crtc->pipe;
+       u16 coeffs[9] = {};
+       int i;
 
        /*
         * FIXME if there's a gamma LUT after the CSC, we should
         * do the range compression using the gamma LUT instead.
         */
        if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
-               limited_color_range = intel_crtc_state->limited_color_range;
+               limited_color_range = crtc_state->limited_color_range;
 
-       if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-           intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
-               ilk_load_ycbcr_conversion_matrix(intel_crtc);
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+           crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+               ilk_load_ycbcr_conversion_matrix(crtc);
                return;
-       } else if (crtc_state->ctm) {
-               struct drm_color_ctm *ctm = crtc_state->ctm->data;
+       } else if (crtc_state->base.ctm) {
+               struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
                const u64 *input;
                u64 temp[9];
 
@@ -168,7 +172,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
                 * hardware.
                 */
                for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
-                       uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
+                       u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
 
                        /*
                         * Clamp input value to min/max supported by
@@ -230,7 +234,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
        I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
 
        if (INTEL_GEN(dev_priv) > 6) {
-               uint16_t postoff = 0;
+               u16 postoff = 0;
 
                if (limited_color_range)
                        postoff = (16 * (1 << 12) / 255) & 0x1fff;
@@ -241,7 +245,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
 
                I915_WRITE(PIPE_CSC_MODE(pipe), 0);
        } else {
-               uint32_t mode = CSC_MODE_YUV_TO_RGB;
+               u32 mode = CSC_MODE_YUV_TO_RGB;
 
                if (limited_color_range)
                        mode |= CSC_BLACK_SCREEN_OFFSET;
@@ -253,21 +257,20 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
 /*
  * Set up the pipe CSC unit on CherryView.
  */
-static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = to_intel_crtc(crtc)->pipe;
-       uint32_t mode;
-
-       if (state->ctm) {
-               struct drm_color_ctm *ctm = state->ctm->data;
-               uint16_t coeffs[9] = { 0, };
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 mode;
+
+       if (crtc_state->base.ctm) {
+               const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+               u16 coeffs[9] = {};
                int i;
 
                for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
-                       uint64_t abs_coeff =
+                       u64 abs_coeff =
                                ((1ULL << 63) - 1) & ctm->matrix[i];
 
                        /* Round coefficient. */
@@ -293,35 +296,24 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
                I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
        }
 
-       mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
-       if (!crtc_state_is_legacy_gamma(state)) {
-               mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
-                       (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+       mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
+       if (!crtc_state_is_legacy_gamma(crtc_state)) {
+               mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+                       (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
        }
        I915_WRITE(CGM_PIPE_MODE(pipe), mode);
 }
 
-void intel_color_set_csc(struct drm_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc_state->crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       if (dev_priv->display.load_csc_matrix)
-               dev_priv->display.load_csc_matrix(crtc_state);
-}
-
 /* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(struct drm_crtc *crtc,
-                                   struct drm_property_blob *blob,
-                                   struct intel_crtc_state *crtc_state)
+static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
+                                   const struct drm_property_blob *blob)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
        int i;
 
-       if (HAS_GMCH_DISPLAY(dev_priv)) {
+       if (HAS_GMCH(dev_priv)) {
                if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
@@ -329,23 +321,24 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
        }
 
        if (blob) {
-               struct drm_color_lut *lut = blob->data;
+               const struct drm_color_lut *lut = blob->data;
+
                for (i = 0; i < 256; i++) {
-                       uint32_t word =
+                       u32 word =
                                (drm_color_lut_extract(lut[i].red, 8) << 16) |
                                (drm_color_lut_extract(lut[i].green, 8) << 8) |
                                drm_color_lut_extract(lut[i].blue, 8);
 
-                       if (HAS_GMCH_DISPLAY(dev_priv))
+                       if (HAS_GMCH(dev_priv))
                                I915_WRITE(PALETTE(pipe, i), word);
                        else
                                I915_WRITE(LGC_PALETTE(pipe, i), word);
                }
        } else {
                for (i = 0; i < 256; i++) {
-                       uint32_t word = (i << 16) | (i << 8) | i;
+                       u32 word = (i << 16) | (i << 8) | i;
 
-                       if (HAS_GMCH_DISPLAY(dev_priv))
+                       if (HAS_GMCH(dev_priv))
                                I915_WRITE(PALETTE(pipe, i), word);
                        else
                                I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -353,56 +346,37 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
        }
 }
 
-static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
 {
-       i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
-                               to_intel_crtc_state(crtc_state));
+       i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
 }
 
-/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
-static void haswell_load_luts(struct drm_crtc_state *crtc_state)
+static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = crtc_state->crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *intel_crtc_state =
-               to_intel_crtc_state(crtc_state);
-       bool reenable_ips = false;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       /*
-        * Workaround : Do not read or write the pipe palette/gamma data while
-        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-        */
-       if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
-           (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
-               hsw_disable_ips(intel_crtc_state);
-               reenable_ips = true;
-       }
+       I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
 
-       intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
-       I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
-
-       i9xx_load_luts(crtc_state);
-
-       if (reenable_ips)
-               hsw_enable_ips(intel_crtc_state);
+       ilk_load_csc_matrix(crtc_state);
 }
 
-static void bdw_load_degamma_lut(struct drm_crtc_state *state)
+static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
-       uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+       u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+       enum pipe pipe = crtc->pipe;
 
        I915_WRITE(PREC_PAL_INDEX(pipe),
                   PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
 
-       if (state->degamma_lut) {
-               struct drm_color_lut *lut = state->degamma_lut->data;
+       if (degamma_lut) {
+               const struct drm_color_lut *lut = degamma_lut->data;
 
                for (i = 0; i < lut_size; i++) {
-                       uint32_t word =
+                       u32 word =
                        drm_color_lut_extract(lut[i].red, 10) << 20 |
                        drm_color_lut_extract(lut[i].green, 10) << 10 |
                        drm_color_lut_extract(lut[i].blue, 10);
@@ -411,7 +385,7 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
                }
        } else {
                for (i = 0; i < lut_size; i++) {
-                       uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+                       u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
 
                        I915_WRITE(PREC_PAL_DATA(pipe),
                                   (v << 20) | (v << 10) | v);
@@ -419,11 +393,13 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
        }
 }
 
-static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
+static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 offset)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
-       uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+       enum pipe pipe = crtc->pipe;
 
        WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
 
@@ -432,11 +408,11 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
                   PAL_PREC_AUTO_INCREMENT |
                   offset);
 
-       if (state->gamma_lut) {
-               struct drm_color_lut *lut = state->gamma_lut->data;
+       if (gamma_lut) {
+               const struct drm_color_lut *lut = gamma_lut->data;
 
                for (i = 0; i < lut_size; i++) {
-                       uint32_t word =
+                       u32 word =
                        (drm_color_lut_extract(lut[i].red, 10) << 20) |
                        (drm_color_lut_extract(lut[i].green, 10) << 10) |
                        drm_color_lut_extract(lut[i].blue, 10);
@@ -454,7 +430,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
                           drm_color_lut_extract(lut[i].blue, 16));
        } else {
                for (i = 0; i < lut_size; i++) {
-                       uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+                       u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
 
                        I915_WRITE(PREC_PAL_DATA(pipe),
                                   (v << 20) | (v << 10) | v);
@@ -467,38 +443,34 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
 }
 
 /* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(struct drm_crtc_state *state)
+static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
-
-       if (crtc_state_is_legacy_gamma(state)) {
-               haswell_load_luts(state);
-               return;
-       }
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
 
-       bdw_load_degamma_lut(state);
-       bdw_load_gamma_lut(state,
-                          INTEL_INFO(dev_priv)->color.degamma_lut_size);
-
-       intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
-       I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
-       POSTING_READ(GAMMA_MODE(pipe));
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
+               i9xx_load_luts(crtc_state);
+       } else {
+               bdw_load_degamma_lut(crtc_state);
+               bdw_load_gamma_lut(crtc_state,
+                                  INTEL_INFO(dev_priv)->color.degamma_lut_size);
 
-       /*
-        * Reset the index, otherwise it prevents the legacy palette to be
-        * written properly.
-        */
-       I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+               /*
+                * Reset the index, otherwise it prevents the legacy palette to be
+                * written properly.
+                */
+               I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+       }
 }
 
-static void glk_load_degamma_lut(struct drm_crtc_state *state)
+static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
-       const uint32_t lut_size = 33;
-       uint32_t i;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       const u32 lut_size = 33;
+       u32 i;
 
        /*
         * When setting the auto-increment bit, the hardware seems to
@@ -513,7 +485,7 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
         *  different values per channel, so this just loads a linear table.
         */
        for (i = 0; i < lut_size; i++) {
-               uint32_t v = (i * (1 << 16)) / (lut_size - 1);
+               u32 v = (i * (1 << 16)) / (lut_size - 1);
 
                I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
        }
@@ -523,51 +495,49 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
                I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
 }
 
-static void glk_load_luts(struct drm_crtc_state *state)
+static void glk_load_luts(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
-       glk_load_degamma_lut(state);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
 
-       if (crtc_state_is_legacy_gamma(state)) {
-               haswell_load_luts(state);
-               return;
-       }
+       glk_load_degamma_lut(crtc_state);
 
-       bdw_load_gamma_lut(state, 0);
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
+               i9xx_load_luts(crtc_state);
+       } else {
+               bdw_load_gamma_lut(crtc_state, 0);
 
-       intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
-       I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
-       POSTING_READ(GAMMA_MODE(pipe));
+               /*
+                * Reset the index, otherwise it prevents the legacy palette to be
+                * written properly.
+                */
+               I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+       }
 }
 
-/* Loads the palette/gamma unit for the CRTC on CherryView. */
-static void cherryview_load_luts(struct drm_crtc_state *state)
+static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
-       struct drm_color_lut *lut;
-       uint32_t i, lut_size;
-       uint32_t word0, word1;
-
-       if (crtc_state_is_legacy_gamma(state)) {
-               /* Turn off degamma/gamma on CGM block. */
-               I915_WRITE(CGM_PIPE_MODE(pipe),
-                          (state->ctm ? CGM_PIPE_MODE_CSC : 0));
-               i9xx_load_luts_internal(crtc, state->gamma_lut,
-                                       to_intel_crtc_state(state));
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+       enum pipe pipe = crtc->pipe;
+
+       cherryview_load_csc_matrix(crtc_state);
+
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
+               i9xx_load_luts_internal(crtc_state, gamma_lut);
                return;
        }
 
-       if (state->degamma_lut) {
-               lut = state->degamma_lut->data;
-               lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+       if (degamma_lut) {
+               const struct drm_color_lut *lut = degamma_lut->data;
+               int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+
                for (i = 0; i < lut_size; i++) {
+                       u32 word0, word1;
+
                        /* Write LUT in U0.14 format. */
                        word0 =
                        (drm_color_lut_extract(lut[i].green, 14) << 16) |
@@ -579,10 +549,13 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
                }
        }
 
-       if (state->gamma_lut) {
-               lut = state->gamma_lut->data;
-               lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+       if (gamma_lut) {
+               const struct drm_color_lut *lut = gamma_lut->data;
+               int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+
                for (i = 0; i < lut_size; i++) {
+                       u32 word0, word1;
+
                        /* Write LUT in U0.10 format. */
                        word0 =
                        (drm_color_lut_extract(lut[i].green, 10) << 16) |
@@ -594,74 +567,100 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
                }
        }
 
-       I915_WRITE(CGM_PIPE_MODE(pipe),
-                  (state->ctm ? CGM_PIPE_MODE_CSC : 0) |
-                  (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
-                  (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
-
        /*
         * Also program a linear LUT in the legacy block (behind the
         * CGM block).
         */
-       i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
+       i9xx_load_luts_internal(crtc_state, NULL);
 }
 
-void intel_color_load_luts(struct drm_crtc_state *crtc_state)
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc_state->crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 
        dev_priv->display.load_luts(crtc_state);
 }
 
-int intel_color_check(struct drm_crtc *crtc,
-                     struct drm_crtc_state *crtc_state)
+void intel_color_commit(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       size_t gamma_length, degamma_length;
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       if (dev_priv->display.color_commit)
+               dev_priv->display.color_commit(crtc_state);
+}
+
+static int check_lut_size(const struct drm_property_blob *lut, int expected)
+{
+       int len;
+
+       if (!lut)
+               return 0;
+
+       len = drm_color_lut_size(lut);
+       if (len != expected) {
+               DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+                             len, expected);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int intel_color_check(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+       int gamma_length, degamma_length;
+       u32 gamma_tests, degamma_tests;
 
        degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
        gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+       degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
+       gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
 
-       /*
-        * We allow both degamma & gamma luts at the right size or
-        * NULL.
-        */
-       if ((!crtc_state->degamma_lut ||
-            drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
-           (!crtc_state->gamma_lut ||
-            drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
+       /* Always allow legacy gamma LUT with no further checking. */
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
+               crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
                return 0;
+       }
 
-       /*
-        * We also allow no degamma lut/ctm and a gamma lut at the legacy
-        * size (256 entries).
-        */
-       if (crtc_state_is_legacy_gamma(crtc_state))
-               return 0;
+       if (check_lut_size(degamma_lut, degamma_length) ||
+           check_lut_size(gamma_lut, gamma_length))
+               return -EINVAL;
 
-       return -EINVAL;
+       if (drm_color_lut_check(degamma_lut, degamma_tests) ||
+           drm_color_lut_check(gamma_lut, gamma_tests))
+               return -EINVAL;
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
+       else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+               crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+       else
+               crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+
+       return 0;
 }
 
-void intel_color_init(struct drm_crtc *crtc)
+void intel_color_init(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       drm_mode_crtc_set_gamma_size(crtc, 256);
+       drm_mode_crtc_set_gamma_size(&crtc->base, 256);
 
        if (IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
                dev_priv->display.load_luts = cherryview_load_luts;
        } else if (IS_HASWELL(dev_priv)) {
-               dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
-               dev_priv->display.load_luts = haswell_load_luts;
+               dev_priv->display.load_luts = i9xx_load_luts;
+               dev_priv->display.color_commit = hsw_color_commit;
        } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
                   IS_BROXTON(dev_priv)) {
-               dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
                dev_priv->display.load_luts = broadwell_load_luts;
+               dev_priv->display.color_commit = hsw_color_commit;
        } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
-               dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
                dev_priv->display.load_luts = glk_load_luts;
+               dev_priv->display.color_commit = hsw_color_commit;
        } else {
                dev_priv->display.load_luts = i9xx_load_luts;
        }
@@ -669,7 +668,7 @@ void intel_color_init(struct drm_crtc *crtc)
        /* Enable color management support when we have degamma & gamma LUTs. */
        if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
            INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
-               drm_crtc_enable_color_mgmt(crtc,
+               drm_crtc_enable_color_mgmt(&crtc->base,
                                           INTEL_INFO(dev_priv)->color.degamma_lut_size,
                                           true,
                                           INTEL_INFO(dev_priv)->color.gamma_lut_size);
index 18e370f607bcc2e50105c5854b70d61611c22709..ee16758747c5d1af85d3251442993edf85eb0685 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/i2c.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
-#include <drm/drmP.h>
 #include "intel_drv.h"
 #include "i915_drv.h"
 
@@ -95,6 +94,10 @@ void intel_connector_destroy(struct drm_connector *connector)
        intel_panel_fini(&intel_connector->panel);
 
        drm_connector_cleanup(connector);
+
+       if (intel_connector->port)
+               drm_dp_mst_put_port_malloc(intel_connector->port);
+
        kfree(connector);
 }
 
index 68f2fb89ece3fa259bfb2da593a7a66c68c81021..3716b2ee362fde71b3d908ba489e91c569c9a6b3 100644 (file)
 #include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 #include "intel_drv.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -84,15 +83,17 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
+       intel_wakeref_t wakeref;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return false;
 
        ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
 
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
 
        return ret;
 }
@@ -322,7 +323,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
                 * DAC limit supposedly 355 MHz.
                 */
                max_clock = 270000;
-       else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
+       else if (IS_GEN_RANGE(dev_priv, 3, 4))
                max_clock = 400000;
        else
                max_clock = 350000;
@@ -344,51 +345,52 @@ intel_crt_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static bool intel_crt_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config,
-                                    struct drm_connector_state *conn_state)
+static int intel_crt_compute_config(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        struct drm_display_mode *adjusted_mode =
                &pipe_config->base.adjusted_mode;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
-       return true;
+
+       return 0;
 }
 
-static bool pch_crt_compute_config(struct intel_encoder *encoder,
-                                  struct intel_crtc_state *pipe_config,
-                                  struct drm_connector_state *conn_state)
+static int pch_crt_compute_config(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
        struct drm_display_mode *adjusted_mode =
                &pipe_config->base.adjusted_mode;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        pipe_config->has_pch_encoder = true;
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
-       return true;
+       return 0;
 }
 
-static bool hsw_crt_compute_config(struct intel_encoder *encoder,
-                                  struct intel_crtc_state *pipe_config,
-                                  struct drm_connector_state *conn_state)
+static int hsw_crt_compute_config(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct drm_display_mode *adjusted_mode =
                &pipe_config->base.adjusted_mode;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        /* HSW/BDW FDI limited to 4k */
        if (adjusted_mode->crtc_hdisplay > 4096 ||
            adjusted_mode->crtc_hblank_start > 4096)
-               return false;
+               return -EINVAL;
 
        pipe_config->has_pch_encoder = true;
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -397,7 +399,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
        if (HAS_PCH_LPT(dev_priv)) {
                if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
                        DRM_DEBUG_KMS("LPT only supports 24bpp\n");
-                       return false;
+                       return -EINVAL;
                }
 
                pipe_config->pipe_bpp = 24;
@@ -406,7 +408,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
        /* FDI must always be 2.7 GHz */
        pipe_config->port_clock = 135000 * 2;
 
-       return true;
+       return 0;
 }
 
 static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -629,19 +631,19 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
 }
 
 static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
+intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
 {
        struct drm_device *dev = crt->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t save_bclrpat;
-       uint32_t save_vtotal;
-       uint32_t vtotal, vactive;
-       uint32_t vsample;
-       uint32_t vblank, vblank_start, vblank_end;
-       uint32_t dsl;
+       u32 save_bclrpat;
+       u32 save_vtotal;
+       u32 vtotal, vactive;
+       u32 vsample;
+       u32 vblank, vblank_start, vblank_end;
+       u32 dsl;
        i915_reg_t bclrpat_reg, vtotal_reg,
                vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
-       uint8_t st00;
+       u8 st00;
        enum drm_connector_status status;
 
        DRM_DEBUG_KMS("starting load-detect on CRT\n");
@@ -666,8 +668,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
        /* Set the border color to purple. */
        I915_WRITE(bclrpat_reg, 0x500050);
 
-       if (!IS_GEN2(dev_priv)) {
-               uint32_t pipeconf = I915_READ(pipeconf_reg);
+       if (!IS_GEN(dev_priv, 2)) {
+               u32 pipeconf = I915_READ(pipeconf_reg);
                I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
                POSTING_READ(pipeconf_reg);
                /* Wait for next Vblank to substitue
@@ -688,8 +690,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
                * Yes, this will flicker
                */
                if (vblank_start <= vactive && vblank_end >= vtotal) {
-                       uint32_t vsync = I915_READ(vsync_reg);
-                       uint32_t vsync_start = (vsync & 0xffff) + 1;
+                       u32 vsync = I915_READ(vsync_reg);
+                       u32 vsync_start = (vsync & 0xffff) + 1;
 
                        vblank_start = vsync_start;
                        I915_WRITE(vblank_reg,
@@ -777,6 +779,7 @@ intel_crt_detect(struct drm_connector *connector,
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_crt *crt = intel_attached_crt(connector);
        struct intel_encoder *intel_encoder = &crt->base;
+       intel_wakeref_t wakeref;
        int status, ret;
        struct intel_load_detect_pipe tmp;
 
@@ -785,7 +788,8 @@ intel_crt_detect(struct drm_connector *connector,
                      force);
 
        if (i915_modparams.load_detect_test) {
-               intel_display_power_get(dev_priv, intel_encoder->power_domain);
+               wakeref = intel_display_power_get(dev_priv,
+                                                 intel_encoder->power_domain);
                goto load_detect;
        }
 
@@ -793,7 +797,8 @@ intel_crt_detect(struct drm_connector *connector,
        if (dmi_check_system(intel_spurious_crt_detect))
                return connector_status_disconnected;
 
-       intel_display_power_get(dev_priv, intel_encoder->power_domain);
+       wakeref = intel_display_power_get(dev_priv,
+                                         intel_encoder->power_domain);
 
        if (I915_HAS_HOTPLUG(dev_priv)) {
                /* We can not rely on the HPD pin always being correctly wired
@@ -848,7 +853,7 @@ load_detect:
        }
 
 out:
-       intel_display_power_put(dev_priv, intel_encoder->power_domain);
+       intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
        return status;
 }
 
@@ -858,10 +863,12 @@ static int intel_crt_get_modes(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_attached_crt(connector);
        struct intel_encoder *intel_encoder = &crt->base;
-       int ret;
+       intel_wakeref_t wakeref;
        struct i2c_adapter *i2c;
+       int ret;
 
-       intel_display_power_get(dev_priv, intel_encoder->power_domain);
+       wakeref = intel_display_power_get(dev_priv,
+                                         intel_encoder->power_domain);
 
        i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
        ret = intel_crt_ddc_get_modes(connector, i2c);
@@ -873,7 +880,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
        ret = intel_crt_ddc_get_modes(connector, i2c);
 
 out:
-       intel_display_power_put(dev_priv, intel_encoder->power_domain);
+       intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
 
        return ret;
 }
@@ -981,7 +988,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
        else
                crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                connector->interlace_allowed = 0;
        else
                connector->interlace_allowed = 1;
index a516697bf57dfc8d63434b95d3f15aaaff748c21..e8ac04c33e290738cf1e54edf1d4c0ddb1b234dc 100644 (file)
@@ -70,50 +70,50 @@ MODULE_FIRMWARE(BXT_CSR_PATH);
 
 struct intel_css_header {
        /* 0x09 for DMC */
-       uint32_t module_type;
+       u32 module_type;
 
        /* Includes the DMC specific header in dwords */
-       uint32_t header_len;
+       u32 header_len;
 
        /* always value would be 0x10000 */
-       uint32_t header_ver;
+       u32 header_ver;
 
        /* Not used */
-       uint32_t module_id;
+       u32 module_id;
 
        /* Not used */
-       uint32_t module_vendor;
+       u32 module_vendor;
 
        /* in YYYYMMDD format */
-       uint32_t date;
+       u32 date;
 
        /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
-       uint32_t size;
+       u32 size;
 
        /* Not used */
-       uint32_t key_size;
+       u32 key_size;
 
        /* Not used */
-       uint32_t modulus_size;
+       u32 modulus_size;
 
        /* Not used */
-       uint32_t exponent_size;
+       u32 exponent_size;
 
        /* Not used */
-       uint32_t reserved1[12];
+       u32 reserved1[12];
 
        /* Major Minor */
-       uint32_t version;
+       u32 version;
 
        /* Not used */
-       uint32_t reserved2[8];
+       u32 reserved2[8];
 
        /* Not used */
-       uint32_t kernel_header_info;
+       u32 kernel_header_info;
 } __packed;
 
 struct intel_fw_info {
-       uint16_t reserved1;
+       u16 reserved1;
 
        /* Stepping (A, B, C, ..., *). * is a wildcard */
        char stepping;
@@ -121,8 +121,8 @@ struct intel_fw_info {
        /* Sub-stepping (0, 1, ..., *). * is a wildcard */
        char substepping;
 
-       uint32_t offset;
-       uint32_t reserved2;
+       u32 offset;
+       u32 reserved2;
 } __packed;
 
 struct intel_package_header {
@@ -135,14 +135,14 @@ struct intel_package_header {
        unsigned char reserved[10];
 
        /* Number of valid entries in the FWInfo array below */
-       uint32_t num_entries;
+       u32 num_entries;
 
        struct intel_fw_info fw_info[20];
 } __packed;
 
 struct intel_dmc_header {
        /* always value would be 0x40403E3E */
-       uint32_t signature;
+       u32 signature;
 
        /* DMC binary header length */
        unsigned char header_len;
@@ -151,30 +151,30 @@ struct intel_dmc_header {
        unsigned char header_ver;
 
        /* Reserved */
-       uint16_t dmcc_ver;
+       u16 dmcc_ver;
 
        /* Major, Minor */
-       uint32_t        project;
+       u32 project;
 
        /* Firmware program size (excluding header) in dwords */
-       uint32_t        fw_size;
+       u32 fw_size;
 
        /* Major Minor version */
-       uint32_t fw_version;
+       u32 fw_version;
 
        /* Number of valid MMIO cycles present. */
-       uint32_t mmio_count;
+       u32 mmio_count;
 
        /* MMIO address */
-       uint32_t mmioaddr[8];
+       u32 mmioaddr[8];
 
        /* MMIO data */
-       uint32_t mmiodata[8];
+       u32 mmiodata[8];
 
        /* FW filename  */
        unsigned char dfile[32];
 
-       uint32_t reserved1[2];
+       u32 reserved1[2];
 } __packed;
 
 struct stepping_info {
@@ -230,7 +230,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
 
 static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 {
-       uint32_t val, mask;
+       u32 val, mask;
 
        mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
 
@@ -257,7 +257,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 void intel_csr_load_program(struct drm_i915_private *dev_priv)
 {
        u32 *payload = dev_priv->csr.dmc_payload;
-       uint32_t i, fw_size;
+       u32 i, fw_size;
 
        if (!HAS_CSR(dev_priv)) {
                DRM_ERROR("No CSR support available for this platform\n");
@@ -289,17 +289,17 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
        gen9_set_dc_state_debugmask(dev_priv);
 }
 
-static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
-                             const struct firmware *fw)
+static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
+                        const struct firmware *fw)
 {
        struct intel_css_header *css_header;
        struct intel_package_header *package_header;
        struct intel_dmc_header *dmc_header;
        struct intel_csr *csr = &dev_priv->csr;
        const struct stepping_info *si = intel_get_stepping_info(dev_priv);
-       uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
-       uint32_t i;
-       uint32_t *dmc_payload;
+       u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+       u32 i;
+       u32 *dmc_payload;
 
        if (!fw)
                return NULL;
@@ -409,6 +409,21 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
        return memcpy(dmc_payload, &fw->data[readcount], nbytes);
 }
 
+static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+       WARN_ON(dev_priv->csr.wakeref);
+       dev_priv->csr.wakeref =
+               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
+static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+       intel_wakeref_t wakeref __maybe_unused =
+               fetch_and_zero(&dev_priv->csr.wakeref);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+}
+
 static void csr_load_work_fn(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv;
@@ -424,8 +439,7 @@ static void csr_load_work_fn(struct work_struct *work)
 
        if (dev_priv->csr.dmc_payload) {
                intel_csr_load_program(dev_priv);
-
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+               intel_csr_runtime_pm_put(dev_priv);
 
                DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
                         dev_priv->csr.fw_path,
@@ -467,7 +481,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
         * suspend as runtime suspend *requires* a working CSR for whatever
         * reason.
         */
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       intel_csr_runtime_pm_get(dev_priv);
 
        if (INTEL_GEN(dev_priv) >= 12) {
                /* Allow to load fw via parameter using the last known size */
@@ -538,7 +552,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
 
        /* Drop the reference held in case DMC isn't loaded. */
        if (!dev_priv->csr.dmc_payload)
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+               intel_csr_runtime_pm_put(dev_priv);
 }
 
 /**
@@ -558,7 +572,7 @@ void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
         * loaded.
         */
        if (!dev_priv->csr.dmc_payload)
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+               intel_csr_runtime_pm_get(dev_priv);
 }
 
 /**
@@ -574,6 +588,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
                return;
 
        intel_csr_ucode_suspend(dev_priv);
+       WARN_ON(dev_priv->csr.wakeref);
 
        kfree(dev_priv->csr.dmc_payload);
 }
index f3e1d6a0b7dda7cabb0cc882e40aecbd83a1501c..ca705546a0abe7380ebfa87f1a21a4da4a9ed133 100644 (file)
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
        { 0x2, 0x7F, 0x3F, 0x00, 0x00 },        /* 400   400      0.0   */
 };
 
-struct icl_combo_phy_ddi_buf_trans {
-       u32 dw2_swing_select;
-       u32 dw2_swing_scalar;
-       u32 dw4_scaling;
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0xB, 0x70, 0x0018 },  /* 600         0.0   */
-       { 0xB, 0x70, 0x3015 },  /* 600         3.5   */
-       { 0xB, 0x70, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x00, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x00, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
-};
-
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0x4, 0x98, 0x0018 },  /* 600         0.0   */
-       { 0x4, 0x98, 0x3015 },  /* 600         3.5   */
-       { 0x4, 0x98, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x76, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x76, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
+/* icl_combo_phy_ddi_translations */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
+                                               /* NT mV Trans mV db    */
+       { 0xA, 0x35, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
+       { 0xA, 0x4F, 0x37, 0x00, 0x08 },        /* 350   500      3.1   */
+       { 0xC, 0x71, 0x2F, 0x00, 0x10 },        /* 350   700      6.0   */
+       { 0x6, 0x7F, 0x2B, 0x00, 0x14 },        /* 350   900      8.2   */
+       { 0xA, 0x4C, 0x3F, 0x00, 0x00 },        /* 500   500      0.0   */
+       { 0xC, 0x73, 0x34, 0x00, 0x0B },        /* 500   700      2.9   */
+       { 0x6, 0x7F, 0x2F, 0x00, 0x10 },        /* 500   900      5.1   */
+       { 0xC, 0x6C, 0x3C, 0x00, 0x03 },        /* 650   700      0.6   */
+       { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   900      3.5   */
+       { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 900   900      0.0   */
 };
 
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
+                                               /* NT mV Trans mV db    */
+       { 0x0, 0x7F, 0x3F, 0x00, 0x00 },        /* 200   200      0.0   */
+       { 0x8, 0x7F, 0x38, 0x00, 0x07 },        /* 200   250      1.9   */
+       { 0x1, 0x7F, 0x33, 0x00, 0x0C },        /* 200   300      3.5   */
+       { 0x9, 0x7F, 0x31, 0x00, 0x0E },        /* 200   350      4.9   */
+       { 0x8, 0x7F, 0x3F, 0x00, 0x00 },        /* 250   250      0.0   */
+       { 0x1, 0x7F, 0x38, 0x00, 0x07 },        /* 250   300      1.6   */
+       { 0x9, 0x7F, 0x35, 0x00, 0x0A },        /* 250   350      2.9   */
+       { 0x1, 0x7F, 0x3F, 0x00, 0x00 },        /* 300   300      0.0   */
+       { 0x9, 0x7F, 0x38, 0x00, 0x07 },        /* 300   350      1.3   */
+       { 0x9, 0x7F, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
 };
 
-/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0x4, 0x98, 0x0018 },  /* 600         0.0   */
-       { 0x4, 0x98, 0x3015 },  /* 600         3.5   */
-       { 0x4, 0x98, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x71, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x71, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
+                                               /* NT mV Trans mV db    */
+       { 0xA, 0x35, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
+       { 0xA, 0x4F, 0x37, 0x00, 0x08 },        /* 350   500      3.1   */
+       { 0xC, 0x71, 0x2F, 0x00, 0x10 },        /* 350   700      6.0   */
+       { 0x6, 0x7F, 0x2B, 0x00, 0x14 },        /* 350   900      8.2   */
+       { 0xA, 0x4C, 0x3F, 0x00, 0x00 },        /* 500   500      0.0   */
+       { 0xC, 0x73, 0x34, 0x00, 0x0B },        /* 500   700      2.9   */
+       { 0x6, 0x7F, 0x2F, 0x00, 0x10 },        /* 500   900      5.1   */
+       { 0xC, 0x6C, 0x3C, 0x00, 0x03 },        /* 650   700      0.6   */
+       { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   900      3.5   */
+       { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 900   900      0.0   */
 };
 
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
+                                               /* NT mV Trans mV db    */
+       { 0xA, 0x60, 0x3F, 0x00, 0x00 },        /* 450   450      0.0   */
+       { 0xB, 0x73, 0x36, 0x00, 0x09 },        /* 450   650      3.2   */
+       { 0x6, 0x7F, 0x31, 0x00, 0x0E },        /* 450   850      5.5   */
+       { 0xB, 0x73, 0x3F, 0x00, 0x00 },        /* 650   650      0.0   ALS */
+       { 0x6, 0x7F, 0x37, 0x00, 0x08 },        /* 650   850      2.3   */
+       { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 850   850      0.0   */
+       { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   850      3.0   */
 };
 
 struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
        }
 }
 
-static const struct icl_combo_phy_ddi_buf_trans *
+static const struct cnl_ddi_buf_trans *
 icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
-                       int type, int *n_entries)
+                       int type, int rate, int *n_entries)
 {
-       u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK;
-
-       if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
-               switch (voltage) {
-               case VOLTAGE_INFO_0_85V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V);
-                       return icl_combo_phy_ddi_translations_edp_0_85V;
-               case VOLTAGE_INFO_0_95V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V);
-                       return icl_combo_phy_ddi_translations_edp_0_95V;
-               case VOLTAGE_INFO_1_05V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
-                       return icl_combo_phy_ddi_translations_edp_1_05V;
-               default:
-                       MISSING_CASE(voltage);
-                       return NULL;
-               }
-       } else {
-               switch (voltage) {
-               case VOLTAGE_INFO_0_85V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
-                       return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
-               case VOLTAGE_INFO_0_95V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
-                       return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
-               case VOLTAGE_INFO_1_05V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
-                       return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
-               default:
-                       MISSING_CASE(voltage);
-                       return NULL;
-               }
+       if (type == INTEL_OUTPUT_HDMI) {
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+               return icl_combo_phy_ddi_translations_hdmi;
+       } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+               return icl_combo_phy_ddi_translations_edp_hbr3;
+       } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+               return icl_combo_phy_ddi_translations_edp_hbr2;
        }
+
+       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+       return icl_combo_phy_ddi_translations_dp_hbr2;
 }
 
 static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 
        if (IS_ICELAKE(dev_priv)) {
                if (intel_port_is_combophy(dev_priv, port))
-                       icl_get_combo_buf_trans(dev_priv, port,
-                                               INTEL_OUTPUT_HDMI, &n_entries);
+                       icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+                                               0, &n_entries);
                else
                        n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
                default_entry = n_entries - 1;
@@ -1039,7 +974,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
        DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
 }
 
-static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 {
        switch (pll->info->id) {
        case DPLL_ID_WRPLL1:
@@ -1060,8 +995,8 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
        }
 }
 
-static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
-                                      const struct intel_crtc_state *crtc_state)
+static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *crtc_state)
 {
        const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
        int clock = crtc_state->port_clock;
@@ -1069,10 +1004,11 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
 
        switch (id) {
        default:
+               /*
+                * DPLL_ID_ICL_DPLL0 and DPLL_ID_ICL_DPLL1 should not be used
+                * here, so do warn if this get passed in
+                */
                MISSING_CASE(id);
-               /* fall through */
-       case DPLL_ID_ICL_DPLL0:
-       case DPLL_ID_ICL_DPLL1:
                return DDI_CLK_SEL_NONE;
        case DPLL_ID_ICL_TBTPLL:
                switch (clock) {
@@ -1086,7 +1022,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
                        return DDI_CLK_SEL_TBT_810;
                default:
                        MISSING_CASE(clock);
-                       break;
+                       return DDI_CLK_SEL_NONE;
                }
        case DPLL_ID_ICL_MGPLL1:
        case DPLL_ID_ICL_MGPLL2:
@@ -1308,8 +1244,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
                               enum intel_dpll_id pll_id)
 {
        i915_reg_t cfgcr1_reg, cfgcr2_reg;
-       uint32_t cfgcr1_val, cfgcr2_val;
-       uint32_t p0, p1, p2, dco_freq;
+       u32 cfgcr1_val, cfgcr2_val;
+       u32 p0, p1, p2, dco_freq;
 
        cfgcr1_reg = DPLL_CFGCR1(pll_id);
        cfgcr2_reg = DPLL_CFGCR2(pll_id);
@@ -1361,14 +1297,17 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
        dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
                1000) / 0x8000;
 
+       if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+               return 0;
+
        return dco_freq / (p0 * p1 * p2 * 5);
 }
 
 int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
                        enum intel_dpll_id pll_id)
 {
-       uint32_t cfgcr0, cfgcr1;
-       uint32_t p0, p1, p2, dco_freq, ref_clock;
+       u32 cfgcr0, cfgcr1;
+       u32 p0, p1, p2, dco_freq, ref_clock;
 
        if (INTEL_GEN(dev_priv) >= 11) {
                cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
@@ -1453,16 +1392,17 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
 static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
                                enum port port)
 {
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
        u32 mg_pll_div0, mg_clktop_hsclkctl;
        u32 m1, m2_int, m2_frac, div1, div2, refclk;
        u64 tmp;
 
        refclk = dev_priv->cdclk.hw.ref;
 
-       mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
-       mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+       mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+       mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
 
-       m1 = I915_READ(MG_PLL_DIV1(port)) & MG_PLL_DIV1_FBPREDIV_MASK;
+       m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK;
        m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
        m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
                  (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
@@ -1533,7 +1473,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
        int link_clock = 0;
-       uint32_t pll_id;
+       u32 pll_id;
 
        pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
        if (intel_port_is_combophy(dev_priv, port)) {
@@ -1558,7 +1498,7 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int link_clock = 0;
-       uint32_t cfgcr0;
+       u32 cfgcr0;
        enum intel_dpll_id pll_id;
 
        pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1612,7 +1552,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int link_clock = 0;
-       uint32_t dpll_ctl1;
+       u32 dpll_ctl1;
        enum intel_dpll_id pll_id;
 
        pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1801,7 +1741,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       uint32_t temp;
+       u32 temp;
 
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
        if (state == true)
@@ -1819,7 +1759,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
        enum pipe pipe = crtc->pipe;
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        enum port port = encoder->port;
-       uint32_t temp;
+       u32 temp;
 
        /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
        temp = TRANS_DDI_FUNC_ENABLE;
@@ -1880,7 +1820,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
                        temp |= TRANS_DDI_MODE_SELECT_DVI;
 
                if (crtc_state->hdmi_scrambling)
-                       temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
+                       temp |= TRANS_DDI_HDMI_SCRAMBLING;
                if (crtc_state->hdmi_high_tmds_clock_ratio)
                        temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
        } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
@@ -1903,7 +1843,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
-       uint32_t val = I915_READ(reg);
+       u32 val = I915_READ(reg);
 
        val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
        val |= TRANS_DDI_PORT_NONE;
@@ -1922,12 +1862,14 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 {
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       intel_wakeref_t wakeref;
        enum pipe pipe = 0;
        int ret = 0;
-       uint32_t tmp;
+       u32 tmp;
 
-       if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv,
-                                               intel_encoder->power_domain)))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    intel_encoder->power_domain);
+       if (WARN_ON(!wakeref))
                return -ENXIO;
 
        if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
@@ -1942,7 +1884,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
                tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
        I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
 out:
-       intel_display_power_put(dev_priv, intel_encoder->power_domain);
+       intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
        return ret;
 }
 
@@ -1953,13 +1895,15 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
        struct intel_encoder *encoder = intel_connector->encoder;
        int type = intel_connector->base.connector_type;
        enum port port = encoder->port;
-       enum pipe pipe = 0;
        enum transcoder cpu_transcoder;
-       uint32_t tmp;
+       intel_wakeref_t wakeref;
+       enum pipe pipe = 0;
+       u32 tmp;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return false;
 
        if (!encoder->get_hw_state(encoder, &pipe)) {
@@ -2001,7 +1945,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
        }
 
 out:
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
 
        return ret;
 }
@@ -2012,6 +1956,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = encoder->port;
+       intel_wakeref_t wakeref;
        enum pipe p;
        u32 tmp;
        u8 mst_pipe_mask;
@@ -2019,8 +1964,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
        *pipe_mask = 0;
        *is_dp_mst = false;
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return;
 
        tmp = I915_READ(DDI_BUF_CTL(port));
@@ -2091,7 +2037,7 @@ out:
                                  "(PHY_CTL %08x)\n", port_name(port), tmp);
        }
 
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
 }
 
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2188,7 +2134,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
 }
 
 static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
-                               enum port port, uint8_t iboost)
+                               enum port port, u8 iboost)
 {
        u32 tmp;
 
@@ -2207,7 +2153,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
-       uint8_t iboost;
+       u8 iboost;
 
        if (type == INTEL_OUTPUT_HDMI)
                iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
@@ -2275,13 +2221,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = encoder->port;
        int n_entries;
 
        if (IS_ICELAKE(dev_priv)) {
                if (intel_port_is_combophy(dev_priv, port))
                        icl_get_combo_buf_trans(dev_priv, port, encoder->type,
-                                               &n_entries);
+                                               intel_dp->link_rate, &n_entries);
                else
                        n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
        } else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2409,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
 }
 
 static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
-                                        u32 level, enum port port, int type)
+                                       u32 level, enum port port, int type,
+                                       int rate)
 {
-       const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL;
+       const struct cnl_ddi_buf_trans *ddi_translations = NULL;
        u32 n_entries, val;
        int ln;
 
        ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
-                                                  &n_entries);
+                                                  rate, &n_entries);
        if (!ddi_translations)
                return;
 
@@ -2478,34 +2426,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
                level = n_entries - 1;
        }
 
-       /* Set PORT_TX_DW5 Rterm Sel to 110b. */
+       /* Set PORT_TX_DW5 */
        val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
-       val &= ~RTERM_SELECT_MASK;
+       val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
+                 TAP2_DISABLE | TAP3_DISABLE);
+       val |= SCALING_MODE_SEL(0x2);
        val |= RTERM_SELECT(0x6);
-       I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
-
-       /* Program PORT_TX_DW5 */
-       val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
-       /* Set DisableTap2 and DisableTap3 if MIPI DSI
-        * Clear DisableTap2 and DisableTap3 for all other Ports
-        */
-       if (type == INTEL_OUTPUT_DSI) {
-               val |= TAP2_DISABLE;
-               val |= TAP3_DISABLE;
-       } else {
-               val &= ~TAP2_DISABLE;
-               val &= ~TAP3_DISABLE;
-       }
+       val |= TAP3_DISABLE;
        I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 
        /* Program PORT_TX_DW2 */
        val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
        val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
                 RCOMP_SCALAR_MASK);
-       val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select);
-       val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select);
+       val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
+       val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
        /* Program Rcomp scalar for every table entry */
-       val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar);
+       val |= RCOMP_SCALAR(0x98);
        I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
 
        /* Program PORT_TX_DW4 */
@@ -2514,9 +2451,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
                val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
                val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
                         CURSOR_COEFF_MASK);
-               val |= ddi_translations[level].dw4_scaling;
+               val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
+               val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
+               val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
                I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
        }
+
+       /* Program PORT_TX_DW7 */
+       val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
+       val &= ~N_SCALAR_MASK;
+       val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
+       I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
 }
 
 static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2526,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
        I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 
        /* 5. Program swing and de-emphasis */
-       icl_ddi_combo_vswing_program(dev_priv, level, port, type);
+       icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
 
        /* 6. Set training enable to trigger update */
        val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
@@ -2722,7 +2667,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
                icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
 }
 
-static uint32_t translate_signal_level(int signal_levels)
+static u32 translate_signal_level(int signal_levels)
 {
        int i;
 
@@ -2737,9 +2682,9 @@ static uint32_t translate_signal_level(int signal_levels)
        return 0;
 }
 
-static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp)
+static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
 {
-       uint8_t train_set = intel_dp->train_set[0];
+       u8 train_set = intel_dp->train_set[0];
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
 
@@ -2764,7 +2709,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
        return 0;
 }
 
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+u32 ddi_signal_levels(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2778,8 +2723,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 }
 
 static inline
-uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
-                                  enum port port)
+u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+                             enum port port)
 {
        if (intel_port_is_combophy(dev_priv, port)) {
                return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
@@ -2914,7 +2859,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
-       uint32_t val;
+       u32 val;
        const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
        if (WARN_ON(!pll))
@@ -2925,7 +2870,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
        if (IS_ICELAKE(dev_priv)) {
                if (!intel_port_is_combophy(dev_priv, port))
                        I915_WRITE(DDI_CLK_SEL(port),
-                                  icl_pll_to_ddi_pll_sel(encoder, crtc_state));
+                                  icl_pll_to_ddi_clk_sel(encoder, crtc_state));
        } else if (IS_CANNONLAKE(dev_priv)) {
                /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
                val = I915_READ(DPCLKA_CFGCR0);
@@ -3349,7 +3294,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        intel_edp_panel_vdd_on(intel_dp);
        intel_edp_panel_off(intel_dp);
 
-       intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+       intel_display_power_put_unchecked(dev_priv,
+                                         dig_port->ddi_io_power_domain);
 
        intel_ddi_clk_disable(encoder);
 }
@@ -3369,7 +3315,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
 
        intel_disable_ddi_buf(encoder, old_crtc_state);
 
-       intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+       intel_display_power_put_unchecked(dev_priv,
+                                         dig_port->ddi_io_power_domain);
 
        intel_ddi_clk_disable(encoder);
 
@@ -3411,7 +3358,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
                                const struct drm_connector_state *old_conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       uint32_t val;
+       u32 val;
 
        /*
         * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -3603,6 +3550,26 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
                intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
 }
 
+static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
+                                    const struct intel_crtc_state *crtc_state,
+                                    const struct drm_connector_state *conn_state)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+       intel_psr_enable(intel_dp, crtc_state);
+       intel_edp_drrs_enable(intel_dp, crtc_state);
+
+       intel_panel_update_backlight(encoder, crtc_state, conn_state);
+}
+
+static void intel_ddi_update_pipe(struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *crtc_state,
+                                 const struct drm_connector_state *conn_state)
+{
+       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+               intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+}
+
 static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
                                         const struct intel_crtc_state *pipe_config,
                                         enum port port)
@@ -3671,8 +3638,8 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
 
        if (intel_crtc_has_dp_encoder(crtc_state) ||
            intel_port_is_tc(dev_priv, encoder->port))
-               intel_display_power_put(dev_priv,
-                                       intel_ddi_main_link_aux_domain(dig_port));
+               intel_display_power_put_unchecked(dev_priv,
+                                                 intel_ddi_main_link_aux_domain(dig_port));
 }
 
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3681,7 +3648,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv =
                to_i915(intel_dig_port->base.base.dev);
        enum port port = intel_dig_port->base.port;
-       uint32_t val;
+       u32 val;
        bool wait = false;
 
        if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
@@ -3793,8 +3760,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
                        pipe_config->has_infoframe = true;
 
-               if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
-                       TRANS_DDI_HDMI_SCRAMBLING_MASK)
+               if (temp & TRANS_DDI_HDMI_SCRAMBLING)
                        pipe_config->hdmi_scrambling = true;
                if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
                        pipe_config->hdmi_high_tmds_clock_ratio = true;
@@ -3875,9 +3841,9 @@ intel_ddi_compute_output_type(struct intel_encoder *encoder,
        }
 }
 
-static bool intel_ddi_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config,
-                                    struct drm_connector_state *conn_state)
+static int intel_ddi_compute_config(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
@@ -3901,9 +3867,50 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 
 }
 
+static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+       intel_dp_encoder_suspend(encoder);
+
+       /*
+        * TODO: disconnect also from USB DP alternate mode once we have a
+        * way to handle the modeset restore in that mode during resume
+        * even if the sink has disappeared while being suspended.
+        */
+       if (dig_port->tc_legacy_port)
+               icl_tc_phy_disconnect(i915, dig_port);
+}
+
+static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
+{
+       struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
+       struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
+
+       if (intel_port_is_tc(i915, dig_port->base.port))
+               intel_digital_port_connected(&dig_port->base);
+
+       intel_dp_encoder_reset(drm_encoder);
+}
+
+static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *i915 = to_i915(encoder->dev);
+
+       intel_dp_encoder_flush_work(encoder);
+
+       if (intel_port_is_tc(i915, dig_port->base.port))
+               icl_tc_phy_disconnect(i915, dig_port);
+
+       drm_encoder_cleanup(encoder);
+       kfree(dig_port);
+}
+
 static const struct drm_encoder_funcs intel_ddi_funcs = {
-       .reset = intel_dp_encoder_reset,
-       .destroy = intel_dp_encoder_destroy,
+       .reset = intel_ddi_encoder_reset,
+       .destroy = intel_ddi_encoder_destroy,
 };
 
 static struct intel_connector *
@@ -4147,16 +4154,16 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
 
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 {
+       struct ddi_vbt_port_info *port_info =
+               &dev_priv->vbt.ddi_port_info[port];
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
        bool init_hdmi, init_dp, init_lspcon = false;
        enum pipe pipe;
 
-
-       init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
-                    dev_priv->vbt.ddi_port_info[port].supports_hdmi);
-       init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+       init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
+       init_dp = port_info->supports_dp;
 
        if (intel_bios_is_lspcon_present(dev_priv, port)) {
                /*
@@ -4195,9 +4202,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        intel_encoder->pre_enable = intel_ddi_pre_enable;
        intel_encoder->disable = intel_disable_ddi;
        intel_encoder->post_disable = intel_ddi_post_disable;
+       intel_encoder->update_pipe = intel_ddi_update_pipe;
        intel_encoder->get_hw_state = intel_ddi_get_hw_state;
        intel_encoder->get_config = intel_ddi_get_config;
-       intel_encoder->suspend = intel_dp_encoder_suspend;
+       intel_encoder->suspend = intel_ddi_encoder_suspend;
        intel_encoder->get_power_domains = intel_ddi_get_power_domains;
        intel_encoder->type = INTEL_OUTPUT_DDI;
        intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4216,6 +4224,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
        intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 
+       intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
+                                        !port_info->supports_typec_usb &&
+                                        !port_info->supports_tbt;
+
        switch (port) {
        case PORT_A:
                intel_dig_port->ddi_io_power_domain =
@@ -4274,6 +4286,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        }
 
        intel_infoframe_init(intel_dig_port);
+
+       if (intel_port_is_tc(dev_priv, port))
+               intel_digital_port_connected(intel_encoder);
+
        return;
 
 err:
index 1e56319334f38ed248a2d12d93ddc3b97937d6c8..855a5074ad775c2314dbf1e645008ee8709d9bb9 100644 (file)
@@ -104,7 +104,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
        drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
 }
 
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
                                    struct drm_printer *p)
 {
        sseu_dump(&info->sseu, p);
@@ -113,21 +113,6 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
                   info->cs_timestamp_frequency_khz);
 }
 
-void intel_device_info_dump(const struct intel_device_info *info,
-                           struct drm_printer *p)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(info, struct drm_i915_private, info);
-
-       drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
-                  INTEL_DEVID(dev_priv),
-                  INTEL_REVID(dev_priv),
-                  intel_platform_name(info->platform),
-                  info->gen);
-
-       intel_device_info_dump_flags(info, p);
-}
-
 void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
                                     struct drm_printer *p)
 {
@@ -164,7 +149,7 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
 
 static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u8 s_en;
        u32 ss_en, ss_en_mask;
        u8 eu_en;
@@ -203,7 +188,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
 
 static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        const u32 fuse2 = I915_READ(GEN8_FUSE2);
        int s, ss;
        const int eu_mask = 0xff;
@@ -280,7 +265,7 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
 
 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u32 fuse;
 
        fuse = I915_READ(CHV_FUSE_GT);
@@ -334,7 +319,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 {
        struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       struct sseu_dev_info *sseu = &info->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        int s, ss;
        u32 fuse2, eu_disable, subslice_mask;
        const u8 eu_mask = 0xff;
@@ -437,7 +422,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 
 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        int s, ss;
        u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
 
@@ -519,8 +504,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 
 static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       struct sseu_dev_info *sseu = &info->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u32 fuse1;
        int s, ss;
 
@@ -528,9 +512,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
         * There isn't a register to tell us how many slices/subslices. We
         * work off the PCI-ids here.
         */
-       switch (info->gt) {
+       switch (INTEL_INFO(dev_priv)->gt) {
        default:
-               MISSING_CASE(info->gt);
+               MISSING_CASE(INTEL_INFO(dev_priv)->gt);
                /* fall through */
        case 1:
                sseu->slice_mask = BIT(0);
@@ -725,7 +709,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 
 /**
  * intel_device_info_runtime_init - initialize runtime info
- * @info: intel device info struct
+ * @dev_priv: the i915 device
  *
  * Determine various intel_device_info fields at runtime.
  *
@@ -739,29 +723,29 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
  *   - after the PCH has been detected,
  *   - before the first usage of the fields it can tweak.
  */
-void intel_device_info_runtime_init(struct intel_device_info *info)
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv =
-               container_of(info, struct drm_i915_private, info);
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
        enum pipe pipe;
 
        if (INTEL_GEN(dev_priv) >= 10) {
                for_each_pipe(dev_priv, pipe)
-                       info->num_scalers[pipe] = 2;
-       } else if (IS_GEN9(dev_priv)) {
-               info->num_scalers[PIPE_A] = 2;
-               info->num_scalers[PIPE_B] = 2;
-               info->num_scalers[PIPE_C] = 1;
+                       runtime->num_scalers[pipe] = 2;
+       } else if (IS_GEN(dev_priv, 9)) {
+               runtime->num_scalers[PIPE_A] = 2;
+               runtime->num_scalers[PIPE_B] = 2;
+               runtime->num_scalers[PIPE_C] = 1;
        }
 
        BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
 
-       if (IS_GEN11(dev_priv))
+       if (IS_GEN(dev_priv, 11))
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 6;
-       else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+                       runtime->num_sprites[pipe] = 6;
+       else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 3;
+                       runtime->num_sprites[pipe] = 3;
        else if (IS_BROXTON(dev_priv)) {
                /*
                 * Skylake and Broxton currently don't expose the topmost plane as its
@@ -772,22 +756,22 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
                 * down the line.
                 */
 
-               info->num_sprites[PIPE_A] = 2;
-               info->num_sprites[PIPE_B] = 2;
-               info->num_sprites[PIPE_C] = 1;
+               runtime->num_sprites[PIPE_A] = 2;
+               runtime->num_sprites[PIPE_B] = 2;
+               runtime->num_sprites[PIPE_C] = 1;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 2;
+                       runtime->num_sprites[pipe] = 2;
        } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 1;
+                       runtime->num_sprites[pipe] = 1;
        }
 
        if (i915_modparams.disable_display) {
                DRM_INFO("Display disabled (module parameter)\n");
                info->num_pipes = 0;
        } else if (HAS_DISPLAY(dev_priv) &&
-                  (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
+                  (IS_GEN_RANGE(dev_priv, 7, 8)) &&
                   HAS_PCH_SPLIT(dev_priv)) {
                u32 fuse_strap = I915_READ(FUSE_STRAP);
                u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -811,7 +795,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
                        DRM_INFO("PipeC fused off\n");
                        info->num_pipes -= 1;
                }
-       } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
+       } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
                u32 dfsm = I915_READ(SKL_DFSM);
                u8 disabled_mask = 0;
                bool invalid;
@@ -851,20 +835,20 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
                cherryview_sseu_info_init(dev_priv);
        else if (IS_BROADWELL(dev_priv))
                broadwell_sseu_info_init(dev_priv);
-       else if (IS_GEN9(dev_priv))
+       else if (IS_GEN(dev_priv, 9))
                gen9_sseu_info_init(dev_priv);
-       else if (IS_GEN10(dev_priv))
+       else if (IS_GEN(dev_priv, 10))
                gen10_sseu_info_init(dev_priv);
        else if (INTEL_GEN(dev_priv) >= 11)
                gen11_sseu_info_init(dev_priv);
 
-       if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+       if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
                DRM_INFO("Disabling ppGTT for VT-d support\n");
                info->ppgtt = INTEL_PPGTT_NONE;
        }
 
        /* Initialize command stream timestamp frequency */
-       info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+       runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
 }
 
 void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -884,35 +868,44 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
 void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 {
        struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       u32 media_fuse;
+       unsigned int logical_vdbox = 0;
        unsigned int i;
+       u32 media_fuse;
 
        if (INTEL_GEN(dev_priv) < 11)
                return;
 
        media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
 
-       info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
-       info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
-                            GEN11_GT_VEBOX_DISABLE_SHIFT;
+       RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+       RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+               GEN11_GT_VEBOX_DISABLE_SHIFT;
 
-       DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
+       DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
        for (i = 0; i < I915_MAX_VCS; i++) {
                if (!HAS_ENGINE(dev_priv, _VCS(i)))
                        continue;
 
-               if (!(BIT(i) & info->vdbox_enable)) {
+               if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
                        info->ring_mask &= ~ENGINE_MASK(_VCS(i));
                        DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+                       continue;
                }
+
+               /*
+                * In Gen11, only even numbered logical VDBOXes are
+                * hooked up to an SFC (Scaler & Format Converter) unit.
+                */
+               if (logical_vdbox++ % 2 == 0)
+                       RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
        }
 
-       DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
+       DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
        for (i = 0; i < I915_MAX_VECS; i++) {
                if (!HAS_ENGINE(dev_priv, _VECS(i)))
                        continue;
 
-               if (!(BIT(i) & info->vebox_enable)) {
+               if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
                        info->ring_mask &= ~ENGINE_MASK(_VECS(i));
                        DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
                }
index 1caf24e2cf0ba5a7d332ef4add45c085f332128c..e8b8661df746c3317926275310632c0bf05a98b9 100644 (file)
@@ -89,6 +89,7 @@ enum intel_ppgtt {
        func(is_alpha_support); \
        /* Keep has_* in alphabetical order */ \
        func(has_64bit_reloc); \
+       func(gpu_reset_clobbers_display); \
        func(has_reset_engine); \
        func(has_fpga_dbg); \
        func(has_guc); \
@@ -114,7 +115,7 @@ enum intel_ppgtt {
        func(has_ddi); \
        func(has_dp_mst); \
        func(has_fbc); \
-       func(has_gmch_display); \
+       func(has_gmch); \
        func(has_hotplug); \
        func(has_ipc); \
        func(has_overlay); \
@@ -152,12 +153,10 @@ struct sseu_dev_info {
 typedef u8 intel_ring_mask_t;
 
 struct intel_device_info {
-       u16 device_id;
        u16 gen_mask;
 
        u8 gen;
        u8 gt; /* GT number, 0 if undefined */
-       u8 num_rings;
        intel_ring_mask_t ring_mask; /* Rings supported by the HW */
 
        enum intel_platform platform;
@@ -169,8 +168,6 @@ struct intel_device_info {
        u32 display_mmio_offset;
 
        u8 num_pipes;
-       u8 num_sprites[I915_MAX_PIPES];
-       u8 num_scalers[I915_MAX_PIPES];
 
 #define DEFINE_FLAG(name) u8 name:1
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -189,6 +186,22 @@ struct intel_device_info {
        int trans_offsets[I915_MAX_TRANSCODERS];
        int cursor_offsets[I915_MAX_PIPES];
 
+       struct color_luts {
+               u16 degamma_lut_size;
+               u16 gamma_lut_size;
+               u32 degamma_lut_tests;
+               u32 gamma_lut_tests;
+       } color;
+};
+
+struct intel_runtime_info {
+       u16 device_id;
+
+       u8 num_sprites[I915_MAX_PIPES];
+       u8 num_scalers[I915_MAX_PIPES];
+
+       u8 num_rings;
+
        /* Slice/subslice/EU info */
        struct sseu_dev_info sseu;
 
@@ -198,10 +211,8 @@ struct intel_device_info {
        u8 vdbox_enable;
        u8 vebox_enable;
 
-       struct color_luts {
-               u16 degamma_lut_size;
-               u16 gamma_lut_size;
-       } color;
+       /* Media engine access to SFC per instance */
+       u8 vdbox_sfc_access;
 };
 
 struct intel_driver_caps {
@@ -258,12 +269,10 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
 
 const char *intel_platform_name(enum intel_platform platform);
 
-void intel_device_info_runtime_init(struct intel_device_info *info);
-void intel_device_info_dump(const struct intel_device_info *info,
-                           struct drm_printer *p);
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
 void intel_device_info_dump_flags(const struct intel_device_info *info,
                                  struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
                                    struct drm_printer *p);
 void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
                                     struct drm_printer *p);
index 3da9c0f9e9485c7c4b9ccf8fefe5c71f72f1ea02..619f1a20cc2db9d320c7b2aa819453fb81ab20df 100644 (file)
 #include <linux/slab.h>
 #include <linux/vgaarb.h>
 #include <drm/drm_edid.h>
-#include <drm/drmP.h>
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "intel_dsi.h"
-#include "i915_trace.h"
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_rect.h>
 #include <drm/drm_atomic_uapi.h>
 #include <linux/intel-iommu.h>
 #include <linux/reservation.h>
 
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
+
 /* Primary plane formats for gen <= 3 */
-static const uint32_t i8xx_primary_formats[] = {
+static const u32 i8xx_primary_formats[] = {
        DRM_FORMAT_C8,
        DRM_FORMAT_RGB565,
        DRM_FORMAT_XRGB1555,
@@ -58,7 +60,7 @@ static const uint32_t i8xx_primary_formats[] = {
 };
 
 /* Primary plane formats for gen >= 4 */
-static const uint32_t i965_primary_formats[] = {
+static const u32 i965_primary_formats[] = {
        DRM_FORMAT_C8,
        DRM_FORMAT_RGB565,
        DRM_FORMAT_XRGB8888,
@@ -67,18 +69,18 @@ static const uint32_t i965_primary_formats[] = {
        DRM_FORMAT_XBGR2101010,
 };
 
-static const uint64_t i9xx_format_modifiers[] = {
+static const u64 i9xx_format_modifiers[] = {
        I915_FORMAT_MOD_X_TILED,
        DRM_FORMAT_MOD_LINEAR,
        DRM_FORMAT_MOD_INVALID
 };
 
 /* Cursor formats */
-static const uint32_t intel_cursor_formats[] = {
+static const u32 intel_cursor_formats[] = {
        DRM_FORMAT_ARGB8888,
 };
 
-static const uint64_t cursor_format_modifiers[] = {
+static const u64 cursor_format_modifiers[] = {
        DRM_FORMAT_MOD_LINEAR,
        DRM_FORMAT_MOD_INVALID
 };
@@ -494,7 +496,7 @@ static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
        return clock->dot;
 }
 
-static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
 {
        return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 }
@@ -529,8 +531,8 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
        clock->p = clock->p1 * clock->p2;
        if (WARN_ON(clock->n == 0 || clock->p == 0))
                return 0;
-       clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
-                       clock->n << 22);
+       clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
+                                          clock->n << 22);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 
        return clock->dot / 5;
@@ -892,7 +894,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
        struct drm_device *dev = crtc->base.dev;
        unsigned int best_error_ppm;
        struct dpll clock;
-       uint64_t m2;
+       u64 m2;
        int found = false;
 
        memset(best_clock, 0, sizeof(*best_clock));
@@ -914,7 +916,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
 
                        clock.p = clock.p1 * clock.p2;
 
-                       m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
+                       m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
                                        clock.n) << 22, refclk * clock.m1);
 
                        if (m2 > INT_MAX/clock.m1)
@@ -984,7 +986,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
        u32 line1, line2;
        u32 line_mask;
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                line_mask = DSL_LINEMASK_GEN2;
        else
                line_mask = DSL_LINEMASK_GEN3;
@@ -1110,7 +1112,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
        u32 val;
 
        /* ILK FDI PLL is always enabled */
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                return;
 
        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1198,17 +1200,19 @@ void assert_pipe(struct drm_i915_private *dev_priv,
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
        enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
 
        /* we keep both pipes enabled on 830 */
        if (IS_I830(dev_priv))
                state = true;
 
        power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-       if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (wakeref) {
                u32 val = I915_READ(PIPECONF(cpu_transcoder));
                cur_state = !!(val & PIPECONF_ENABLE);
 
-               intel_display_power_put(dev_priv, power_domain);
+               intel_display_power_put(dev_priv, power_domain, wakeref);
        } else {
                cur_state = false;
        }
@@ -1609,7 +1613,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
        i915_reg_t reg;
-       uint32_t val, pipeconf_val;
+       u32 val, pipeconf_val;
 
        /* Make sure PCH DPLL is enabled */
        assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
@@ -1697,7 +1701,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
                                            enum pipe pipe)
 {
        i915_reg_t reg;
-       uint32_t val;
+       u32 val;
 
        /* FDI relies on the transcoder */
        assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1754,6 +1758,35 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
                return crtc->pipe;
 }
 
+static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       /*
+        * On i965gm the hardware frame counter reads
+        * zero when the TV encoder is enabled :(
+        */
+       if (IS_I965GM(dev_priv) &&
+           (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+               return 0;
+
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+               return 0xffffffff; /* full 32 bit counter */
+       else if (INTEL_GEN(dev_priv) >= 3)
+               return 0xffffff; /* only 24 bits of frame count */
+       else
+               return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       drm_crtc_set_max_vblank_count(&crtc->base,
+                                     intel_crtc_max_vblank_count(crtc_state));
+       drm_crtc_vblank_on(&crtc->base);
+}
+
 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
@@ -1772,7 +1805,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
         * need the check.
         */
-       if (HAS_GMCH_DISPLAY(dev_priv)) {
+       if (HAS_GMCH(dev_priv)) {
                if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
@@ -1806,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
         * when it's derived from the timestamps. So let's wait for the
         * pipe to start properly before we call drm_crtc_vblank_on()
         */
-       if (dev_priv->drm.max_vblank_count == 0)
+       if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
                intel_wait_for_pipe_scanline_moving(crtc);
 }
 
@@ -1850,7 +1883,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 
 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 {
-       return IS_GEN2(dev_priv) ? 2048 : 4096;
+       return IS_GEN(dev_priv, 2) ? 2048 : 4096;
 }
 
 static unsigned int
@@ -1863,7 +1896,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
        case DRM_FORMAT_MOD_LINEAR:
                return cpp;
        case I915_FORMAT_MOD_X_TILED:
-               if (IS_GEN2(dev_priv))
+               if (IS_GEN(dev_priv, 2))
                        return 128;
                else
                        return 512;
@@ -1872,7 +1905,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
                        return 128;
                /* fall through */
        case I915_FORMAT_MOD_Y_TILED:
-               if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
+               if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
                        return 128;
                else
                        return 512;
@@ -2024,6 +2057,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
        struct drm_device *dev = fb->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       intel_wakeref_t wakeref;
        struct i915_vma *vma;
        unsigned int pinctl;
        u32 alignment;
@@ -2047,7 +2081,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
         * intel_runtime_pm_put(), so it is correct to wrap only the
         * pin/unpin/fence and not more.
         */
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
 
@@ -2060,7 +2094,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
         * complicated than this. For example, Cherryview appears quite
         * happy to scanout from anywhere within its global aperture.
         */
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                pinctl |= PIN_MAPPABLE;
 
        vma = i915_gem_object_pin_to_display_plane(obj,
@@ -2102,7 +2136,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
 err:
        atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        return vma;
 }
 
@@ -2373,7 +2407,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
        return 0;
 }
 
-static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
+static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
 {
        switch (fb_modifier) {
        case I915_FORMAT_MOD_X_TILED:
@@ -3161,7 +3195,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 
-       if (!HAS_GMCH_DISPLAY(dev_priv)) {
+       if (!HAS_GMCH(dev_priv)) {
                return 32*1024;
        } else if (INTEL_GEN(dev_priv) >= 4) {
                if (modifier == I915_FORMAT_MOD_X_TILED)
@@ -3181,28 +3215,38 @@ i9xx_plane_max_stride(struct intel_plane *plane,
        }
 }
 
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 dspcntr = 0;
+
+       dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+       if (INTEL_GEN(dev_priv) < 5)
+               dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+       return dspcntr;
+}
+
 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
        struct drm_i915_private *dev_priv =
                to_i915(plane_state->base.plane->dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        const struct drm_framebuffer *fb = plane_state->base.fb;
        unsigned int rotation = plane_state->base.rotation;
        u32 dspcntr;
 
-       dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
+       dspcntr = DISPLAY_PLANE_ENABLE;
 
-       if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
-           IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+       if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+           IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
-       if (INTEL_GEN(dev_priv) < 5)
-               dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
        switch (fb->format->format) {
        case DRM_FORMAT_C8:
                dspcntr |= DISPPLANE_8BPP;
@@ -3330,11 +3374,13 @@ static void i9xx_update_plane(struct intel_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
        u32 linear_offset;
-       u32 dspcntr = plane_state->ctl;
        int x = plane_state->color_plane[0].x;
        int y = plane_state->color_plane[0].y;
        unsigned long irqflags;
        u32 dspaddr_offset;
+       u32 dspcntr;
+
+       dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
 
        linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
 
@@ -3394,10 +3440,23 @@ static void i9xx_disable_plane(struct intel_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
        unsigned long irqflags;
+       u32 dspcntr;
+
+       /*
+        * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+        * enable on ilk+ affect the pipe bottom color as
+        * well, so we must configure them even if the plane
+        * is disabled.
+        *
+        * On pre-g4x there is no way to gamma correct the
+        * pipe bottom color but we'll keep on doing this
+        * anyway.
+        */
+       dspcntr = i9xx_plane_ctl_crtc(crtc_state);
 
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-       I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
+       I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
        if (INTEL_GEN(dev_priv) >= 4)
                I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
        else
@@ -3412,6 +3471,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+       intel_wakeref_t wakeref;
        bool ret;
        u32 val;
 
@@ -3421,7 +3481,8 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
         * display power wells.
         */
        power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        val = I915_READ(DSPCNTR(i9xx_plane));
@@ -3434,7 +3495,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
                *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
                        DISPPLANE_SEL_PIPE_SHIFT;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -3503,7 +3564,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
        return stride / skl_plane_stride_mult(fb, color_plane, rotation);
 }
 
-static u32 skl_plane_ctl_format(uint32_t pixel_format)
+static u32 skl_plane_ctl_format(u32 pixel_format)
 {
        switch (pixel_format) {
        case DRM_FORMAT_C8:
@@ -3573,7 +3634,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
        }
 }
 
-static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+static u32 skl_plane_ctl_tiling(u64 fb_modifier)
 {
        switch (fb_modifier) {
        case DRM_FORMAT_MOD_LINEAR:
@@ -3632,6 +3693,20 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
        return 0;
 }
 
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       u32 plane_ctl = 0;
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               return plane_ctl;
+
+       plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+       plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+       return plane_ctl;
+}
+
 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
                  const struct intel_plane_state *plane_state)
 {
@@ -3646,10 +3721,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 
        if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
                plane_ctl |= skl_plane_ctl_alpha(plane_state);
-               plane_ctl |=
-                       PLANE_CTL_PIPE_GAMMA_ENABLE |
-                       PLANE_CTL_PIPE_CSC_ENABLE |
-                       PLANE_CTL_PLANE_GAMMA_DISABLE;
+               plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
 
                if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
                        plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -3674,19 +3746,27 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
        return plane_ctl;
 }
 
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       u32 plane_color_ctl = 0;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               return plane_color_ctl;
+
+       plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+       plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+
+       return plane_color_ctl;
+}
+
 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
                        const struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
        const struct drm_framebuffer *fb = plane_state->base.fb;
        struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        u32 plane_color_ctl = 0;
 
-       if (INTEL_GEN(dev_priv) < 11) {
-               plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
-               plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
-       }
        plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
        plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
 
@@ -3735,7 +3815,7 @@ __intel_display_resume(struct drm_device *dev,
        }
 
        /* ignore any reset values/BIOS leftovers in the WM registers */
-       if (!HAS_GMCH_DISPLAY(to_i915(dev)))
+       if (!HAS_GMCH(to_i915(dev)))
                to_intel_atomic_state(state)->skip_intermediate_wm = true;
 
        ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
@@ -3746,8 +3826,8 @@ __intel_display_resume(struct drm_device *dev,
 
 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
 {
-       return intel_has_gpu_reset(dev_priv) &&
-               INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
+       return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
+               intel_has_gpu_reset(dev_priv));
 }
 
 void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -3860,6 +3940,30 @@ unlock:
        clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
 }
 
+static void icl_set_pipe_chicken(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 tmp;
+
+       tmp = I915_READ(PIPE_CHICKEN(pipe));
+
+       /*
+        * Display WA #1153: icl
+        * enable hardware to bypass the alpha math
+        * and rounding for per-pixel values 00 and 0xff
+        */
+       tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
+
+       /*
+        * W/A for underruns with linear/X-tiled with
+        * WM1+ disabled.
+        */
+       tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
+
+       I915_WRITE(PIPE_CHICKEN(pipe), tmp);
+}
+
 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
                                     const struct intel_crtc_state *new_crtc_state)
 {
@@ -3894,6 +3998,19 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
                else if (old_crtc_state->pch_pfit.enabled)
                        ironlake_pfit_disable(old_crtc_state);
        }
+
+       /*
+        * We don't (yet) allow userspace to control the pipe background color,
+        * so force it to black, but apply pipe gamma and CSC so that its
+        * handling will match how we program our planes.
+        */
+       if (INTEL_GEN(dev_priv) >= 9)
+               I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
+                          SKL_BOTTOM_COLOR_GAMMA_ENABLE |
+                          SKL_BOTTOM_COLOR_CSC_ENABLE);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_set_pipe_chicken(crtc);
 }
 
 static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -4120,7 +4237,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
        temp = I915_READ(reg);
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_2;
-       if (IS_GEN6(dev_priv)) {
+       if (IS_GEN(dev_priv, 6)) {
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
                /* SNB-B */
                temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -4593,7 +4710,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c
 
 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
 {
-       uint32_t temp;
+       u32 temp;
 
        temp = I915_READ(SOUTH_CHICKEN1);
        if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
@@ -4919,10 +5036,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
        /* range checks */
        if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
            dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
-           (IS_GEN11(dev_priv) &&
+           (IS_GEN(dev_priv, 11) &&
             (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
              dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
-           (!IS_GEN11(dev_priv) &&
+           (!IS_GEN(dev_priv, 11) &&
             (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
              dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
                DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5213,7 +5330,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
         * FIXME: Need to fix the logic to work when we turn off all planes
         * but leave the pipe running.
         */
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
        /* Underruns don't always raise interrupts, so check manually. */
@@ -5234,7 +5351,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
         * Gen2 reports pipe underruns whenever all planes are disabled.
         * So disable underrun reporting before all the planes get disabled.
         */
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
        hsw_disable_ips(to_intel_crtc_state(crtc->state));
@@ -5248,7 +5365,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
         * event which is after the vblank start event, so we need to have a
         * wait-for-vblank between disabling the plane and the pipe.
         */
-       if (HAS_GMCH_DISPLAY(dev_priv) &&
+       if (HAS_GMCH(dev_priv) &&
            intel_set_memory_cxsr(dev_priv, false))
                intel_wait_for_vblank(dev_priv, pipe);
 }
@@ -5256,24 +5373,54 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
                                       const struct intel_crtc_state *new_crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
        if (!old_crtc_state->ips_enabled)
                return false;
 
        if (needs_modeset(&new_crtc_state->base))
                return true;
 
+       /*
+        * Workaround : Do not read or write the pipe palette/gamma data while
+        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+        *
+        * Disable IPS before we program the LUT.
+        */
+       if (IS_HASWELL(dev_priv) &&
+           (new_crtc_state->base.color_mgmt_changed ||
+            new_crtc_state->update_pipe) &&
+           new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+               return true;
+
        return !new_crtc_state->ips_enabled;
 }
 
 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
                                       const struct intel_crtc_state *new_crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
        if (!new_crtc_state->ips_enabled)
                return false;
 
        if (needs_modeset(&new_crtc_state->base))
                return true;
 
+       /*
+        * Workaround : Do not read or write the pipe palette/gamma data while
+        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+        *
+        * Re-enable IPS after the LUT has been programmed.
+        */
+       if (IS_HASWELL(dev_priv) &&
+           (new_crtc_state->base.color_mgmt_changed ||
+            new_crtc_state->update_pipe) &&
+           new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+               return true;
+
        /*
         * We can't read out IPS on broadwell, assume the worst and
         * forcibly enable IPS on the first fastset.
@@ -5292,7 +5439,7 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
                return false;
 
        /* WA Display #0827: Gen9:all */
-       if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
                return true;
 
        return false;
@@ -5365,7 +5512,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
                 * Gen2 reports pipe underruns whenever all planes are disabled.
                 * So disable underrun reporting before all the planes get disabled.
                 */
-               if (IS_GEN2(dev_priv) && old_primary_state->visible &&
+               if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
                    (modeset || !new_primary_state->base.visible))
                        intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
        }
@@ -5385,7 +5532,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
         * event which is after the vblank start event, so we need to have a
         * wait-for-vblank between disabling the plane and the pipe.
         */
-       if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
+       if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
            pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
                intel_wait_for_vblank(dev_priv, crtc->pipe);
 
@@ -5578,6 +5725,26 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
        }
 }
 
+static void intel_encoders_update_pipe(struct drm_crtc *crtc,
+                                      struct intel_crtc_state *crtc_state,
+                                      struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->update_pipe)
+                       encoder->update_pipe(encoder, crtc_state, conn_state);
+       }
+}
+
 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
                                 struct drm_atomic_state *old_state)
 {
@@ -5641,7 +5808,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
         * On ILK+ LUT must be loaded before the pipe is running but with
         * clocks enabled
         */
-       intel_color_load_luts(&pipe_config->base);
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
 
        if (dev_priv->display.initial_watermarks != NULL)
                dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5651,7 +5819,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
                ironlake_pch_enable(old_intel_state, pipe_config);
 
        assert_vblank_disabled(crtc);
-       drm_crtc_vblank_on(crtc);
+       intel_crtc_vblank_on(pipe_config);
 
        intel_encoders_enable(crtc, pipe_config, old_state);
 
@@ -5696,7 +5864,7 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
-       uint32_t val;
+       u32 val;
 
        val = MBUS_DBOX_A_CREDIT(2);
        val |= MBUS_DBOX_BW_CREDIT(1);
@@ -5716,7 +5884,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
        bool psl_clkgate_wa;
-       u32 pipe_chicken;
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -5752,8 +5919,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
        haswell_set_pipemisc(pipe_config);
 
-       intel_color_set_csc(&pipe_config->base);
-
        intel_crtc->active = true;
 
        /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
@@ -5771,18 +5936,11 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
         * On ILK+ LUT must be loaded before the pipe is running but with
         * clocks enabled
         */
-       intel_color_load_luts(&pipe_config->base);
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
 
-       /*
-        * Display WA #1153: enable hardware to bypass the alpha math
-        * and rounding for per-pixel values 00 and 0xff
-        */
-       if (INTEL_GEN(dev_priv) >= 11) {
-               pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
-               if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
-                       I915_WRITE_FW(PIPE_CHICKEN(pipe),
-                                     pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
-       }
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_set_pipe_chicken(intel_crtc);
 
        intel_ddi_set_pipe_settings(pipe_config);
        if (!transcoder_is_dsi(cpu_transcoder))
@@ -5805,7 +5963,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
                intel_ddi_set_vc_payload_alloc(pipe_config, true);
 
        assert_vblank_disabled(crtc);
-       drm_crtc_vblank_on(crtc);
+       intel_crtc_vblank_on(pipe_config);
 
        intel_encoders_enable(crtc, pipe_config, old_state);
 
@@ -6087,7 +6245,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
        enum intel_display_power_domain domain;
 
        for_each_power_domain(domain, domains)
-               intel_display_power_put(dev_priv, domain);
+               intel_display_power_put_unchecked(dev_priv, domain);
 }
 
 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6117,8 +6275,6 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 
        i9xx_set_pipeconf(pipe_config);
 
-       intel_color_set_csc(&pipe_config->base);
-
        intel_crtc->active = true;
 
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -6137,14 +6293,15 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 
        i9xx_pfit_enable(pipe_config);
 
-       intel_color_load_luts(&pipe_config->base);
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
 
        dev_priv->display.initial_watermarks(old_intel_state,
                                             pipe_config);
        intel_enable_pipe(pipe_config);
 
        assert_vblank_disabled(crtc);
-       drm_crtc_vblank_on(crtc);
+       intel_crtc_vblank_on(pipe_config);
 
        intel_encoders_enable(crtc, pipe_config, old_state);
 }
@@ -6184,7 +6341,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
        intel_crtc->active = true;
 
-       if (!IS_GEN2(dev_priv))
+       if (!IS_GEN(dev_priv, 2))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6193,7 +6350,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
        i9xx_pfit_enable(pipe_config);
 
-       intel_color_load_luts(&pipe_config->base);
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
 
        if (dev_priv->display.initial_watermarks != NULL)
                dev_priv->display.initial_watermarks(old_intel_state,
@@ -6203,7 +6361,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_enable_pipe(pipe_config);
 
        assert_vblank_disabled(crtc);
-       drm_crtc_vblank_on(crtc);
+       intel_crtc_vblank_on(pipe_config);
 
        intel_encoders_enable(crtc, pipe_config, old_state);
 }
@@ -6236,7 +6394,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
         * On gen2 planes are double buffered but the pipe isn't, so we must
         * wait for planes to fully turn off before disabling the pipe.
         */
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                intel_wait_for_vblank(dev_priv, pipe);
 
        intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6261,7 +6419,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
        intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 
-       if (!IS_GEN2(dev_priv))
+       if (!IS_GEN(dev_priv, 2))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
        if (!dev_priv->display.initial_watermarks)
@@ -6334,7 +6492,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
 
        domains = intel_crtc->enabled_power_domains;
        for_each_power_domain(domain, domains)
-               intel_display_power_put(dev_priv, domain);
+               intel_display_power_put_unchecked(dev_priv, domain);
        intel_crtc->enabled_power_domains = 0;
 
        dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
@@ -6600,9 +6758,9 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
                (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
 }
 
-static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 {
-       uint32_t pixel_rate;
+       u32 pixel_rate;
 
        pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
 
@@ -6612,8 +6770,8 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
         */
 
        if (pipe_config->pch_pfit.enabled) {
-               uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
-               uint32_t pfit_size = pipe_config->pch_pfit.size;
+               u64 pipe_w, pipe_h, pfit_w, pfit_h;
+               u32 pfit_size = pipe_config->pch_pfit.size;
 
                pipe_w = pipe_config->pipe_src_w;
                pipe_h = pipe_config->pipe_src_h;
@@ -6628,7 +6786,7 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
                if (WARN_ON(!pfit_w || !pfit_h))
                        return pixel_rate;
 
-               pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+               pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
                                     pfit_w * pfit_h);
        }
 
@@ -6639,7 +6797,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                /* FIXME calculate proper pipe pixel rate for GMCH pfit */
                crtc_state->pixel_rate =
                        crtc_state->base.adjusted_mode.crtc_clock;
@@ -6724,7 +6882,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 }
 
 static void
-intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
+intel_reduce_m_n_ratio(u32 *num, u32 *den)
 {
        while (*num > DATA_LINK_M_N_MASK ||
               *den > DATA_LINK_M_N_MASK) {
@@ -6734,7 +6892,7 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 }
 
 static void compute_m_n(unsigned int m, unsigned int n,
-                       uint32_t *ret_m, uint32_t *ret_n,
+                       u32 *ret_m, u32 *ret_n,
                        bool constant_n)
 {
        /*
@@ -6749,7 +6907,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
        else
                *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
 
-       *ret_m = div_u64((uint64_t) m * *ret_n, n);
+       *ret_m = div_u64((u64)m * *ret_n, n);
        intel_reduce_m_n_ratio(ret_m, ret_n);
 }
 
@@ -6779,12 +6937,12 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
-static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
 {
        return (1 << dpll->n) << 16 | dpll->m2;
 }
 
-static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
+static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
 {
        return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
 }
@@ -6868,7 +7026,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
         * Strictly speaking some registers are available before
         * gen7, but we only support DRRS on gen7+
         */
-       return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+       return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
 }
 
 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -7340,7 +7498,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
        enum pipe pipe = crtc->pipe;
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
-       uint32_t crtc_vtotal, crtc_vblank_end;
+       u32 crtc_vtotal, crtc_vblank_end;
        int vsyncshift = 0;
 
        /* We need to be careful not to changed the adjusted mode, for otherwise
@@ -7415,7 +7573,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-       uint32_t tmp;
+       u32 tmp;
 
        tmp = I915_READ(HTOTAL(cpu_transcoder));
        pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
@@ -7486,7 +7644,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       uint32_t pipeconf;
+       u32 pipeconf;
 
        pipeconf = 0;
 
@@ -7731,7 +7889,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
                                 struct intel_crtc_state *pipe_config)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       uint32_t tmp;
+       u32 tmp;
 
        if (INTEL_GEN(dev_priv) <= 3 &&
            (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
@@ -7946,11 +8104,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum intel_display_power_domain power_domain;
-       uint32_t tmp;
+       intel_wakeref_t wakeref;
+       u32 tmp;
        bool ret;
 
        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -8051,7 +8211,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        ret = true;
 
 out:
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -8225,7 +8385,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
 
 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
 {
-       uint32_t tmp;
+       u32 tmp;
 
        tmp = I915_READ(SOUTH_CHICKEN2);
        tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
@@ -8247,7 +8407,7 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
 /* WaMPhyProgramming:hsw */
 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
 {
-       uint32_t tmp;
+       u32 tmp;
 
        tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
        tmp &= ~(0xFF << 24);
@@ -8328,7 +8488,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
                                 bool with_spread, bool with_fdi)
 {
-       uint32_t reg, tmp;
+       u32 reg, tmp;
 
        if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
                with_spread = true;
@@ -8367,7 +8527,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
 /* Sequence to disable CLKOUT_DP */
 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
 {
-       uint32_t reg, tmp;
+       u32 reg, tmp;
 
        mutex_lock(&dev_priv->sb_lock);
 
@@ -8392,7 +8552,7 @@ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
 
 #define BEND_IDX(steps) ((50 + (steps)) / 5)
 
-static const uint16_t sscdivintphase[] = {
+static const u16 sscdivintphase[] = {
        [BEND_IDX( 50)] = 0x3B23,
        [BEND_IDX( 45)] = 0x3B23,
        [BEND_IDX( 40)] = 0x3C23,
@@ -8424,7 +8584,7 @@ static const uint16_t sscdivintphase[] = {
  */
 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
 {
-       uint32_t tmp;
+       u32 tmp;
        int idx = BEND_IDX(steps);
 
        if (WARN_ON(steps % 5 != 0))
@@ -8490,7 +8650,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
-       uint32_t val;
+       u32 val;
 
        val = 0;
 
@@ -8837,7 +8997,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
-       uint32_t ps_ctrl = 0;
+       u32 ps_ctrl = 0;
        int id = -1;
        int i;
 
@@ -8849,6 +9009,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
                        pipe_config->pch_pfit.enabled = true;
                        pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
                        pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+                       scaler_state->scalers[i].in_use = true;
                        break;
                }
        }
@@ -8993,7 +9154,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t tmp;
+       u32 tmp;
 
        tmp = I915_READ(PF_CTL(crtc->pipe));
 
@@ -9005,7 +9166,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
                /* We currently do not free assignements of panel fitters on
                 * ivb/hsw (since we don't use the higher upscaling modes which
                 * differentiates them) so just WARN about this case for now. */
-               if (IS_GEN7(dev_priv)) {
+               if (IS_GEN(dev_priv, 7)) {
                        WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
                                PF_PIPE_SEL_IVB(crtc->pipe));
                }
@@ -9018,11 +9179,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
-       uint32_t tmp;
+       intel_wakeref_t wakeref;
+       u32 tmp;
        bool ret;
 
        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -9105,7 +9268,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        ret = true;
 
 out:
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -9145,7 +9308,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
        I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
 }
 
-static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
 {
        if (IS_HASWELL(dev_priv))
                return I915_READ(D_COMP_HSW);
@@ -9153,7 +9316,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
                return I915_READ(D_COMP_BDW);
 }
 
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
 {
        if (IS_HASWELL(dev_priv)) {
                mutex_lock(&dev_priv->pcu_lock);
@@ -9178,7 +9341,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
                              bool switch_to_fclk, bool allow_power_down)
 {
-       uint32_t val;
+       u32 val;
 
        assert_can_disable_lcpll(dev_priv);
 
@@ -9225,7 +9388,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  */
 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
-       uint32_t val;
+       u32 val;
 
        val = I915_READ(LCPLL_CTL);
 
@@ -9300,7 +9463,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  */
 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 {
-       uint32_t val;
+       u32 val;
 
        DRM_DEBUG_KMS("Enabling package C8+\n");
 
@@ -9316,7 +9479,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 
 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 {
-       uint32_t val;
+       u32 val;
 
        DRM_DEBUG_KMS("Disabling package C8+\n");
 
@@ -9384,7 +9547,7 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
                if (WARN_ON(!intel_dpll_is_combophy(id)))
                        return;
        } else if (intel_port_is_tc(dev_priv, port)) {
-               id = icl_port_to_mg_pll_id(port);
+               id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
        } else {
                WARN(1, "Invalid port %x\n", port);
                return;
@@ -9438,7 +9601,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
                                struct intel_crtc_state *pipe_config)
 {
        enum intel_dpll_id id;
-       uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+       u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
        switch (ddi_pll_sel) {
        case PORT_CLK_SEL_WRPLL1:
@@ -9495,7 +9658,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
         * XXX: Do intel_display_power_get_if_enabled before reading this (for
         * consistency and less surprising code; it's in always on power).
         */
-       for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+       for_each_set_bit(panel_transcoder,
+                        &panel_transcoder_mask,
+                        ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
                enum pipe trans_pipe;
 
                tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
@@ -9541,6 +9706,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
        power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
+
+       WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
        *power_domain_mask |= BIT_ULL(power_domain);
 
        tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@@ -9568,6 +9735,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
                if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                        continue;
+
+               WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
                *power_domain_mask |= BIT_ULL(power_domain);
 
                /*
@@ -9602,7 +9771,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_shared_dpll *pll;
        enum port port;
-       uint32_t tmp;
+       u32 tmp;
 
        tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
 
@@ -9684,7 +9853,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 
        power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
        if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+               WARN_ON(power_domain_mask & BIT_ULL(power_domain));
                power_domain_mask |= BIT_ULL(power_domain);
+
                if (INTEL_GEN(dev_priv) >= 9)
                        skylake_get_pfit_config(crtc, pipe_config);
                else
@@ -9714,7 +9885,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 
 out:
        for_each_power_domain(power_domain, power_domain_mask)
-               intel_display_power_put(dev_priv, power_domain);
+               intel_display_power_put_unchecked(dev_priv, power_domain);
 
        return active;
 }
@@ -9735,7 +9906,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
        base += plane_state->color_plane[0].offset;
 
        /* ILK+ do this automagically */
-       if (HAS_GMCH_DISPLAY(dev_priv) &&
+       if (HAS_GMCH(dev_priv) &&
            plane_state->base.rotation & DRM_MODE_ROTATE_180)
                base += (plane_state->base.crtc_h *
                         plane_state->base.crtc_w - 1) * fb->format->cpp[0];
@@ -9848,11 +10019,15 @@ i845_cursor_max_stride(struct intel_plane *plane,
        return 2048;
 }
 
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       return CURSOR_GAMMA_ENABLE;
+}
+
 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
                           const struct intel_plane_state *plane_state)
 {
        return CURSOR_ENABLE |
-               CURSOR_GAMMA_ENABLE |
                CURSOR_FORMAT_ARGB |
                CURSOR_STRIDE(plane_state->color_plane[0].stride);
 }
@@ -9922,7 +10097,9 @@ static void i845_update_cursor(struct intel_plane *plane,
                unsigned int width = plane_state->base.crtc_w;
                unsigned int height = plane_state->base.crtc_h;
 
-               cntl = plane_state->ctl;
+               cntl = plane_state->ctl |
+                       i845_cursor_ctl_crtc(crtc_state);
+
                size = (height << 12) | width;
 
                base = intel_cursor_base(plane_state);
@@ -9964,17 +10141,19 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
        bool ret;
 
        power_domain = POWER_DOMAIN_PIPE(PIPE_A);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 
        *pipe = PIPE_A;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -9987,27 +10166,36 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
        return plane->base.dev->mode_config.cursor_width * 4;
 }
 
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
-                          const struct intel_plane_state *plane_state)
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        u32 cntl = 0;
 
-       if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
-               cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+       if (INTEL_GEN(dev_priv) >= 11)
+               return cntl;
 
-       if (INTEL_GEN(dev_priv) <= 10) {
-               cntl |= MCURSOR_GAMMA_ENABLE;
+       cntl |= MCURSOR_GAMMA_ENABLE;
 
-               if (HAS_DDI(dev_priv))
-                       cntl |= MCURSOR_PIPE_CSC_ENABLE;
-       }
+       if (HAS_DDI(dev_priv))
+               cntl |= MCURSOR_PIPE_CSC_ENABLE;
 
        if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
                cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
 
+       return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+                          const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       u32 cntl = 0;
+
+       if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+               cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
        switch (plane_state->base.crtc_w) {
        case 64:
                cntl |= MCURSOR_MODE_64_ARGB_AX;
@@ -10132,7 +10320,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
        unsigned long irqflags;
 
        if (plane_state && plane_state->base.visible) {
-               cntl = plane_state->ctl;
+               cntl = plane_state->ctl |
+                       i9xx_cursor_ctl_crtc(crtc_state);
 
                if (plane_state->base.crtc_h != plane_state->base.crtc_w)
                        fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
@@ -10197,6 +10386,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
        bool ret;
        u32 val;
 
@@ -10206,7 +10396,8 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
         * display power wells.
         */
        power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        val = I915_READ(CURCNTR(plane->pipe));
@@ -10219,7 +10410,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
                *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
                        MCURSOR_PIPE_SELECT_SHIFT;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -10468,7 +10659,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
                return dev_priv->vbt.lvds_ssc_freq;
        else if (HAS_PCH_SPLIT(dev_priv))
                return 120000;
-       else if (!IS_GEN2(dev_priv))
+       else if (!IS_GEN(dev_priv, 2))
                return 96000;
        else
                return 48000;
@@ -10501,7 +10692,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
        }
 
-       if (!IS_GEN2(dev_priv)) {
+       if (!IS_GEN(dev_priv, 2)) {
                if (IS_PINEVIEW(dev_priv))
                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
                                DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -10653,20 +10844,17 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
 
 /**
  * intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
+ * @cur: current plane state
+ * @new: new plane state
  *
  * Check current plane state versus the new one to determine whether
  * watermarks need to be recalculated.
  *
  * Returns true or false.
  */
-static bool intel_wm_need_update(struct drm_plane *plane,
-                                struct drm_plane_state *state)
+static bool intel_wm_need_update(struct intel_plane_state *cur,
+                                struct intel_plane_state *new)
 {
-       struct intel_plane_state *new = to_intel_plane_state(state);
-       struct intel_plane_state *cur = to_intel_plane_state(plane->state);
-
        /* Update watermarks on tiling or size changes. */
        if (new->base.visible != cur->base.visible)
                return true;
@@ -10775,7 +10963,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
                /* must disable cxsr around plane enable/disable */
                if (plane->id != PLANE_CURSOR)
                        pipe_config->disable_cxsr = true;
-       } else if (intel_wm_need_update(&plane->base, plane_state)) {
+       } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
+                                       to_intel_plane_state(plane_state))) {
                if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
                        /* FIXME bollocks */
                        pipe_config->update_wm_pre = true;
@@ -10815,9 +11004,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
         * Despite the w/a only being listed for IVB we assume that
         * the ILK/SNB note has similar ramifications, hence we apply
         * the w/a on all three platforms.
+        *
+        * With experimental results seems this is needed also for primary
+        * plane, not only sprite plane.
         */
-       if (plane->id == PLANE_SPRITE0 &&
-           (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+       if (plane->id != PLANE_CURSOR &&
+           (IS_GEN_RANGE(dev_priv, 5, 6) ||
             IS_IVYBRIDGE(dev_priv)) &&
            (turn_on || (!needs_scaling(old_plane_state) &&
                         needs_scaling(to_intel_plane_state(plane_state)))))
@@ -10954,15 +11146,15 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                                   struct drm_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc_state);
        int ret;
        bool mode_changed = needs_modeset(crtc_state);
 
-       if (mode_changed && !crtc_state->active)
+       if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+           mode_changed && !crtc_state->active)
                pipe_config->update_wm_post = true;
 
        if (mode_changed && crtc_state->enable &&
@@ -10974,8 +11166,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                        return ret;
        }
 
-       if (crtc_state->color_mgmt_changed) {
-               ret = intel_color_check(crtc, crtc_state);
+       if (mode_changed || crtc_state->color_mgmt_changed) {
+               ret = intel_color_check(pipe_config);
                if (ret)
                        return ret;
 
@@ -11004,9 +11196,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                 * old state and the new state.  We can program these
                 * immediately.
                 */
-               ret = dev_priv->display.compute_intermediate_wm(dev,
-                                                               intel_crtc,
-                                                               pipe_config);
+               ret = dev_priv->display.compute_intermediate_wm(pipe_config);
                if (ret) {
                        DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
                        return ret;
@@ -11014,7 +11204,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
        }
 
        if (INTEL_GEN(dev_priv) >= 9) {
-               if (mode_changed)
+               if (mode_changed || pipe_config->update_pipe)
                        ret = skl_update_scaler_crtc(pipe_config);
 
                if (!ret)
@@ -11275,7 +11465,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                              pipe_config->scaler_state.scaler_users,
                              pipe_config->scaler_state.scaler_id);
 
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
                              pipe_config->gmch_pfit.control,
                              pipe_config->gmch_pfit.pgm_ratios,
@@ -11387,44 +11577,38 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
        return ret;
 }
 
-static void
+static int
 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv =
                to_i915(crtc_state->base.crtc->dev);
-       struct intel_crtc_scaler_state scaler_state;
-       struct intel_dpll_hw_state dpll_hw_state;
-       struct intel_shared_dpll *shared_dpll;
-       struct intel_crtc_wm_state wm_state;
-       bool force_thru, ips_force_disable;
+       struct intel_crtc_state *saved_state;
+
+       saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
+       if (!saved_state)
+               return -ENOMEM;
 
        /* FIXME: before the switch to atomic started, a new pipe_config was
         * kzalloc'd. Code that depends on any field being zero should be
         * fixed, so that the crtc_state can be safely duplicated. For now,
         * only fields that are know to not cause problems are preserved. */
 
-       scaler_state = crtc_state->scaler_state;
-       shared_dpll = crtc_state->shared_dpll;
-       dpll_hw_state = crtc_state->dpll_hw_state;
-       force_thru = crtc_state->pch_pfit.force_thru;
-       ips_force_disable = crtc_state->ips_force_disable;
+       saved_state->scaler_state = crtc_state->scaler_state;
+       saved_state->shared_dpll = crtc_state->shared_dpll;
+       saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+       saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
+       saved_state->ips_force_disable = crtc_state->ips_force_disable;
        if (IS_G4X(dev_priv) ||
            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               wm_state = crtc_state->wm;
+               saved_state->wm = crtc_state->wm;
 
        /* Keep base drm_crtc_state intact, only clear our extended struct */
        BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
-       memset(&crtc_state->base + 1, 0,
+       memcpy(&crtc_state->base + 1, &saved_state->base + 1,
               sizeof(*crtc_state) - sizeof(crtc_state->base));
 
-       crtc_state->scaler_state = scaler_state;
-       crtc_state->shared_dpll = shared_dpll;
-       crtc_state->dpll_hw_state = dpll_hw_state;
-       crtc_state->pch_pfit.force_thru = force_thru;
-       crtc_state->ips_force_disable = ips_force_disable;
-       if (IS_G4X(dev_priv) ||
-           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               crtc_state->wm = wm_state;
+       kfree(saved_state);
+       return 0;
 }
 
 static int
@@ -11439,7 +11623,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
        int i;
        bool retry = true;
 
-       clear_intel_crtc_state(pipe_config);
+       ret = clear_intel_crtc_state(pipe_config);
+       if (ret)
+               return ret;
 
        pipe_config->cpu_transcoder =
                (enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -11517,10 +11703,13 @@ encoder_retry:
                        continue;
 
                encoder = to_intel_encoder(connector_state->best_encoder);
-
-               if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
-                       DRM_DEBUG_KMS("Encoder config failure\n");
-                       return -EINVAL;
+               ret = encoder->compute_config(encoder, pipe_config,
+                                             connector_state);
+               if (ret < 0) {
+                       if (ret != -EDEADLK)
+                               DRM_DEBUG_KMS("Encoder config failure: %d\n",
+                                             ret);
+                       return ret;
                }
        }
 
@@ -11645,6 +11834,23 @@ pipe_config_err(bool adjust, const char *name, const char *format, ...)
        va_end(args);
 }
 
+static bool fastboot_enabled(struct drm_i915_private *dev_priv)
+{
+       if (i915_modparams.fastboot != -1)
+               return i915_modparams.fastboot;
+
+       /* Enable fastboot by default on Skylake and newer */
+       if (INTEL_GEN(dev_priv) >= 9)
+               return true;
+
+       /* Enable fastboot by default on VLV and CHV */
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return true;
+
+       /* Disabled by default on all others */
+       return false;
+}
+
 static bool
 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
                          struct intel_crtc_state *current_config,
@@ -11656,6 +11862,11 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
                (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
                !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
 
+       if (fixup_inherited && !fastboot_enabled(dev_priv)) {
+               DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
+               ret = false;
+       }
+
 #define PIPE_CONF_CHECK_X(name) do { \
        if (current_config->name != pipe_config->name) { \
                pipe_config_err(adjust, __stringify(name), \
@@ -11964,7 +12175,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
        if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
                return;
 
-       skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+       skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
        sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
 
        skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
@@ -12378,7 +12589,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
         * However if queried just before the start of vblank we'll get an
         * answer that's slightly in the future.
         */
-       if (IS_GEN2(dev_priv)) {
+       if (IS_GEN(dev_priv, 2)) {
                const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
                int vtotal;
 
@@ -12619,9 +12830,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
  * phase.  The code here should be run after the per-crtc and per-plane 'check'
  * handlers to ensure that all derived state has been updated.
  */
-static int calc_watermark_data(struct drm_atomic_state *state)
+static int calc_watermark_data(struct intel_atomic_state *state)
 {
-       struct drm_device *dev = state->dev;
+       struct drm_device *dev = state->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Is there platform-specific watermark information to calculate? */
@@ -12679,8 +12890,7 @@ static int intel_atomic_check(struct drm_device *dev,
                        return ret;
                }
 
-               if (i915_modparams.fastboot &&
-                   intel_pipe_config_compare(dev_priv,
+               if (intel_pipe_config_compare(dev_priv,
                                        to_intel_crtc_state(old_crtc_state),
                                        pipe_config, true)) {
                        crtc_state->mode_changed = false;
@@ -12695,6 +12905,10 @@ static int intel_atomic_check(struct drm_device *dev,
                                       "[modeset]" : "[fastset]");
        }
 
+       ret = drm_dp_mst_atomic_check(state);
+       if (ret)
+               return ret;
+
        if (any_ms) {
                ret = intel_modeset_checks(state);
 
@@ -12713,7 +12927,7 @@ static int intel_atomic_check(struct drm_device *dev,
                return ret;
 
        intel_fbc_choose_crtc(dev_priv, intel_state);
-       return calc_watermark_data(state);
+       return calc_watermark_data(intel_state);
 }
 
 static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -12725,8 +12939,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
+       struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
 
-       if (!dev->max_vblank_count)
+       if (!vblank->max_vblank_count)
                return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
 
        return dev->driver->get_vblank_counter(dev, crtc->pipe);
@@ -12755,9 +12970,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
        } else {
                intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
                                       pipe_config);
+
+               if (pipe_config->update_pipe)
+                       intel_encoders_update_pipe(crtc, pipe_config, state);
        }
 
-       if (new_plane_state)
+       if (pipe_config->update_pipe && !pipe_config->enable_fbc)
+               intel_fbc_disable(intel_crtc);
+       else if (new_plane_state)
                intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
 
        intel_begin_crtc_commit(crtc, old_crtc_state);
@@ -12930,6 +13150,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_crtc *crtc;
        struct intel_crtc *intel_crtc;
        u64 put_domains[I915_MAX_PIPES] = {};
+       intel_wakeref_t wakeref = 0;
        int i;
 
        intel_atomic_commit_fence_wait(intel_state);
@@ -12937,7 +13158,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
        drm_atomic_helper_wait_for_dependencies(state);
 
        if (intel_state->modeset)
-               intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+               wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
@@ -12980,7 +13201,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
                        /* FIXME unify this for all platforms */
                        if (!new_crtc_state->active &&
-                           !HAS_GMCH_DISPLAY(dev_priv) &&
+                           !HAS_GMCH(dev_priv) &&
                            dev_priv->display.initial_watermarks)
                                dev_priv->display.initial_watermarks(intel_state,
                                                                     new_intel_crtc_state);
@@ -13034,6 +13255,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
         */
        drm_atomic_helper_wait_for_flip_done(dev, state);
 
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+
+               if (new_crtc_state->active &&
+                   !needs_modeset(new_crtc_state) &&
+                   (new_intel_crtc_state->base.color_mgmt_changed ||
+                    new_intel_crtc_state->update_pipe))
+                       intel_color_load_luts(new_intel_crtc_state);
+       }
+
        /*
         * Now that the vblank has passed, we can go ahead and program the
         * optimal watermarks on platforms that need two-step watermark
@@ -13074,7 +13305,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                 * the culprit.
                 */
                intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
-               intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
+               intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
        }
 
        /*
@@ -13549,19 +13780,16 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
                intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
        bool modeset = needs_modeset(&intel_cstate->base);
 
-       if (!modeset &&
-           (intel_cstate->base.color_mgmt_changed ||
-            intel_cstate->update_pipe)) {
-               intel_color_set_csc(&intel_cstate->base);
-               intel_color_load_luts(&intel_cstate->base);
-       }
-
        /* Perform vblank evasion around commit operation */
        intel_pipe_update_start(intel_cstate);
 
        if (modeset)
                goto out;
 
+       if (intel_cstate->base.color_mgmt_changed ||
+           intel_cstate->update_pipe)
+               intel_color_commit(intel_cstate);
+
        if (intel_cstate->update_pipe)
                intel_update_pipe_config(old_intel_cstate, intel_cstate);
        else if (INTEL_GEN(dev_priv) >= 9)
@@ -13578,7 +13806,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       if (!IS_GEN2(dev_priv))
+       if (!IS_GEN(dev_priv, 2))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
 
        if (crtc_state->has_pch_encoder) {
@@ -13702,8 +13930,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
                           struct drm_framebuffer *fb,
                           int crtc_x, int crtc_y,
                           unsigned int crtc_w, unsigned int crtc_h,
-                          uint32_t src_x, uint32_t src_y,
-                          uint32_t src_w, uint32_t src_h,
+                          u32 src_x, u32 src_y,
+                          u32 src_w, u32 src_h,
                           struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -14040,7 +14268,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        int i;
 
-       crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
+       crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
        if (!crtc->num_scalers)
                return;
 
@@ -14126,7 +14354,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
-       intel_color_init(&intel_crtc->base);
+       intel_color_init(intel_crtc);
 
        WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
 
@@ -14177,7 +14405,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
        return index_mask;
 }
 
-static bool has_edp_a(struct drm_i915_private *dev_priv)
+static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
 {
        if (!IS_MOBILE(dev_priv))
                return false;
@@ -14185,13 +14413,13 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
        if ((I915_READ(DP_A) & DP_DETECTED) == 0)
                return false;
 
-       if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+       if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
                return false;
 
        return true;
 }
 
-static bool intel_crt_present(struct drm_i915_private *dev_priv)
+static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
 {
        if (INTEL_GEN(dev_priv) >= 9)
                return false;
@@ -14199,15 +14427,12 @@ static bool intel_crt_present(struct drm_i915_private *dev_priv)
        if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
                return false;
 
-       if (IS_CHERRYVIEW(dev_priv))
-               return false;
-
        if (HAS_PCH_LPT_H(dev_priv) &&
            I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
                return false;
 
        /* DDI E can't be used if DDI A requires 4 lanes */
-       if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+       if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
                return false;
 
        if (!dev_priv->vbt.int_crt_support)
@@ -14262,23 +14487,21 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
        if (!HAS_DISPLAY(dev_priv))
                return;
 
-       /*
-        * intel_edp_init_connector() depends on this completing first, to
-        * prevent the registeration of both eDP and LVDS and the incorrect
-        * sharing of the PPS.
-        */
-       intel_lvds_init(dev_priv);
-
-       if (intel_crt_present(dev_priv))
-               intel_crt_init(dev_priv);
-
        if (IS_ICELAKE(dev_priv)) {
                intel_ddi_init(dev_priv, PORT_A);
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_C);
                intel_ddi_init(dev_priv, PORT_D);
                intel_ddi_init(dev_priv, PORT_E);
-               intel_ddi_init(dev_priv, PORT_F);
+               /*
+                * On some ICL SKUs port F is not present. No strap bits for
+                * this, so rely on VBT.
+                * Work around broken VBTs on SKUs known to have no port F.
+                */
+               if (IS_ICL_WITH_PORT_F(dev_priv) &&
+                   intel_bios_is_port_present(dev_priv, PORT_F))
+                       intel_ddi_init(dev_priv, PORT_F);
+
                icl_dsi_init(dev_priv);
        } else if (IS_GEN9_LP(dev_priv)) {
                /*
@@ -14294,6 +14517,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
        } else if (HAS_DDI(dev_priv)) {
                int found;
 
+               if (intel_ddi_crt_present(dev_priv))
+                       intel_crt_init(dev_priv);
+
                /*
                 * Haswell uses DDI functions to detect digital outputs.
                 * On SKL pre-D0 the strap isn't connected, so we assume
@@ -14320,16 +14546,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
                 */
                if (IS_GEN9_BC(dev_priv) &&
-                   (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
-                    dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
-                    dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+                   intel_bios_is_port_present(dev_priv, PORT_E))
                        intel_ddi_init(dev_priv, PORT_E);
 
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                int found;
+
+               /*
+                * intel_edp_init_connector() depends on this completing first,
+                * to prevent the registration of both eDP and LVDS and the
+                * incorrect sharing of the PPS.
+                */
+               intel_lvds_init(dev_priv);
+               intel_crt_init(dev_priv);
+
                dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
 
-               if (has_edp_a(dev_priv))
+               if (ilk_has_edp_a(dev_priv))
                        intel_dp_init(dev_priv, DP_A, PORT_A);
 
                if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@@ -14355,6 +14588,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                bool has_edp, has_port;
 
+               if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
+                       intel_crt_init(dev_priv);
+
                /*
                 * The DP_DETECTED bit is the latched state of the DDC
                 * SDA pin at boot. However since eDP doesn't require DDC
@@ -14397,9 +14633,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                }
 
                vlv_dsi_init(dev_priv);
-       } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
+       } else if (IS_PINEVIEW(dev_priv)) {
+               intel_lvds_init(dev_priv);
+               intel_crt_init(dev_priv);
+       } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
                bool found = false;
 
+               if (IS_MOBILE(dev_priv))
+                       intel_lvds_init(dev_priv);
+
+               intel_crt_init(dev_priv);
+
                if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
                        DRM_DEBUG_KMS("probing SDVOB\n");
                        found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
@@ -14431,11 +14675,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 
                if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
                        intel_dp_init(dev_priv, DP_D, PORT_D);
-       } else if (IS_GEN2(dev_priv))
-               intel_dvo_init(dev_priv);
 
-       if (SUPPORTS_TV(dev_priv))
-               intel_tv_init(dev_priv);
+               if (SUPPORTS_TV(dev_priv))
+                       intel_tv_init(dev_priv);
+       } else if (IS_GEN(dev_priv, 2)) {
+               if (IS_I85X(dev_priv))
+                       intel_lvds_init(dev_priv);
+
+               intel_crt_init(dev_priv);
+               intel_dvo_init(dev_priv);
+       }
 
        intel_psr_init(dev_priv);
 
@@ -14602,14 +14851,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 
        drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
 
-       if (fb->format->format == DRM_FORMAT_NV12 &&
-           (fb->width < SKL_MIN_YUV_420_SRC_W ||
-            fb->height < SKL_MIN_YUV_420_SRC_H ||
-            (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
-               DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
-               goto err;
-       }
-
        for (i = 0; i < fb->format->num_planes; i++) {
                u32 stride_alignment;
 
@@ -14629,7 +14870,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                 * require the entire fb to accommodate that to avoid
                 * potential runtime errors at plane configuration time.
                 */
-               if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
+               if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
                    is_ccs_modifier(fb->modifier))
                        stride_alignment *= 4;
 
@@ -14834,7 +15075,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
-       } else if (!IS_GEN2(dev_priv)) {
+       } else if (!IS_GEN(dev_priv, 2)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
                dev_priv->display.get_initial_plane_config =
                        i9xx_get_initial_plane_config;
@@ -14850,9 +15091,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
        }
 
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
-       } else if (IS_GEN6(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 6)) {
                dev_priv->display.fdi_link_train = gen6_fdi_link_train;
        } else if (IS_IVYBRIDGE(dev_priv)) {
                /* FIXME: detect B0+ stepping and use auto training */
@@ -14945,7 +15186,7 @@ retry:
         * intermediate watermarks (since we don't trust the current
         * watermarks).
         */
-       if (!HAS_GMCH_DISPLAY(dev_priv))
+       if (!HAS_GMCH(dev_priv))
                intel_state->skip_intermediate_wm = true;
 
        ret = intel_atomic_check(dev, state);
@@ -14984,12 +15225,12 @@ fail:
 
 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
 {
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                u32 fdi_pll_clk =
                        I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
 
                dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
-       } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
                dev_priv->fdi_pll_freq = 270000;
        } else {
                return;
@@ -15105,10 +15346,10 @@ int intel_modeset_init(struct drm_device *dev)
        }
 
        /* maximum framebuffer dimensions */
-       if (IS_GEN2(dev_priv)) {
+       if (IS_GEN(dev_priv, 2)) {
                dev->mode_config.max_width = 2048;
                dev->mode_config.max_height = 2048;
-       } else if (IS_GEN3(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 3)) {
                dev->mode_config.max_width = 4096;
                dev->mode_config.max_height = 4096;
        } else {
@@ -15119,7 +15360,7 @@ int intel_modeset_init(struct drm_device *dev)
        if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
                dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
                dev->mode_config.cursor_height = 1023;
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                dev->mode_config.cursor_width = 64;
                dev->mode_config.cursor_height = 64;
        } else {
@@ -15186,7 +15427,7 @@ int intel_modeset_init(struct drm_device *dev)
         * Note that we need to do this after reconstructing the BIOS fb's
         * since the watermark calculation done here will use pstate->fb.
         */
-       if (!HAS_GMCH_DISPLAY(dev_priv))
+       if (!HAS_GMCH(dev_priv))
                sanitize_watermarks(dev);
 
        /*
@@ -15379,6 +15620,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
                            plane->base.type != DRM_PLANE_TYPE_PRIMARY)
                                intel_plane_disable_noatomic(crtc, plane);
                }
+
+               /*
+                * Disable any background color set by the BIOS, but enable the
+                * gamma and CSC to match how we program our planes.
+                */
+               if (INTEL_GEN(dev_priv) >= 9)
+                       I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
+                                  SKL_BOTTOM_COLOR_GAMMA_ENABLE |
+                                  SKL_BOTTOM_COLOR_CSC_ENABLE);
        }
 
        /* Adjust the state of the output pipe according to whether we
@@ -15386,7 +15636,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
        if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
                intel_crtc_disable_noatomic(&crtc->base, ctx);
 
-       if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
+       if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
                /*
                 * We start out with underrun reporting disabled to avoid races.
                 * For correct bookkeeping mark this on active crtcs.
@@ -15415,16 +15665,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
        }
 }
 
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       /*
+        * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
+        * the hardware when a high res displays plugged in. DPLL P
+        * divider is zero, and the pipe timings are bonkers. We'll
+        * try to disable everything in that case.
+        *
+        * FIXME would be nice to be able to sanitize this state
+        * without several WARNs, but for now let's take the easy
+        * road.
+        */
+       return IS_GEN(dev_priv, 6) &&
+               crtc_state->base.active &&
+               crtc_state->shared_dpll &&
+               crtc_state->port_clock == 0;
+}
+
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_connector *connector;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_crtc_state *crtc_state = crtc ?
+               to_intel_crtc_state(crtc->base.state) : NULL;
 
        /* We need to check both for a crtc link (meaning that the
         * encoder is active and trying to read from a pipe) and the
         * pipe itself being active. */
-       bool has_active_crtc = encoder->base.crtc &&
-               to_intel_crtc(encoder->base.crtc)->active;
+       bool has_active_crtc = crtc_state &&
+               crtc_state->base.active;
+
+       if (crtc_state && has_bogus_dpll_config(crtc_state)) {
+               DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+                             pipe_name(crtc->pipe));
+               has_active_crtc = false;
+       }
 
        connector = intel_encoder_find_connector(encoder);
        if (connector && !has_active_crtc) {
@@ -15435,16 +15714,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                /* Connector is active, but has no active pipe. This is
                 * fallout from our resume register restoring. Disable
                 * the encoder manually again. */
-               if (encoder->base.crtc) {
-                       struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+               if (crtc_state) {
+                       struct drm_encoder *best_encoder;
 
                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
                                      encoder->base.base.id,
                                      encoder->base.name);
+
+                       /* avoid oopsing in case the hooks consult best_encoder */
+                       best_encoder = connector->base.state->best_encoder;
+                       connector->base.state->best_encoder = &encoder->base;
+
                        if (encoder->disable)
-                               encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+                               encoder->disable(encoder, crtc_state,
+                                                connector->base.state);
                        if (encoder->post_disable)
-                               encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+                               encoder->post_disable(encoder, crtc_state,
+                                                     connector->base.state);
+
+                       connector->base.state->best_encoder = best_encoder;
                }
                encoder->base.crtc = NULL;
 
@@ -15476,19 +15764,25 @@ void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
 
 void i915_redisable_vga(struct drm_i915_private *dev_priv)
 {
-       /* This function can be called both from intel_modeset_setup_hw_state or
+       intel_wakeref_t wakeref;
+
+       /*
+        * This function can be called both from intel_modeset_setup_hw_state or
         * at a very early point in our resume sequence, where the power well
         * structures are not yet restored. Since this function is at a very
         * paranoid "someone might have enabled VGA while we were not looking"
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
-        * the rest of the driver uses. */
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
+        * the rest of the driver uses.
+        */
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_VGA);
+       if (!wakeref)
                return;
 
        i915_redisable_vga_power_on(dev_priv);
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
 }
 
 /* FIXME read out full plane state for all planes */
@@ -15788,12 +16082,13 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
                             struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc;
        struct intel_crtc_state *crtc_state;
        struct intel_encoder *encoder;
+       struct intel_crtc *crtc;
+       intel_wakeref_t wakeref;
        int i;
 
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
        intel_early_display_was(dev_priv);
        intel_modeset_readout_hw_state(dev);
@@ -15809,10 +16104,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
         * waits, so we need vblank interrupts restored beforehand.
         */
        for_each_intel_crtc(&dev_priv->drm, crtc) {
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+
                drm_crtc_vblank_reset(&crtc->base);
 
-               if (crtc->base.state->active)
-                       drm_crtc_vblank_on(&crtc->base);
+               if (crtc_state->base.active)
+                       intel_crtc_vblank_on(crtc_state);
        }
 
        intel_sanitize_plane_mapping(dev_priv);
@@ -15843,15 +16140,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        }
 
        if (IS_G4X(dev_priv)) {
-               g4x_wm_get_hw_state(dev);
+               g4x_wm_get_hw_state(dev_priv);
                g4x_wm_sanitize(dev_priv);
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               vlv_wm_get_hw_state(dev);
+               vlv_wm_get_hw_state(dev_priv);
                vlv_wm_sanitize(dev_priv);
        } else if (INTEL_GEN(dev_priv) >= 9) {
-               skl_wm_get_hw_state(dev);
+               skl_wm_get_hw_state(dev_priv);
        } else if (HAS_PCH_SPLIT(dev_priv)) {
-               ilk_wm_get_hw_state(dev);
+               ilk_wm_get_hw_state(dev_priv);
        }
 
        for_each_intel_crtc(dev, crtc) {
@@ -15863,7 +16160,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
                        modeset_put_power_domains(dev_priv, put_domains);
        }
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
 
        intel_fbc_init_pipe_state(dev_priv);
 }
@@ -16086,7 +16383,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 
                error->pipe[i].source = I915_READ(PIPESRC(i));
 
-               if (HAS_GMCH_DISPLAY(dev_priv))
+               if (HAS_GMCH(dev_priv))
                        error->pipe[i].stat = I915_READ(PIPESTAT(i));
        }
 
index 4262452963b31442363fe13ccf812c645a831fb7..c7c0686622884206a1fe89854527e7a6bced050e 100644 (file)
@@ -121,7 +121,7 @@ enum i9xx_plane_id {
 };
 
 #define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
 
 /*
  * Per-pipe plane identifier.
@@ -311,12 +311,12 @@ struct intel_link_m_n {
 
 #define for_each_universal_plane(__dev_priv, __pipe, __p)              \
        for ((__p) = 0;                                                 \
-            (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+            (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;       \
             (__p)++)
 
 #define for_each_sprite(__dev_priv, __p, __s)                          \
        for ((__s) = 0;                                                 \
-            (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];        \
+            (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)];      \
             (__s)++)
 
 #define for_each_port_masked(__port, __ports_mask) \
index fdd2cbc56fa335b6bf084c00451eca8a0ef957e3..cf709835fb9a9eece3c0761c21c53c34a25b7e22 100644 (file)
 #include <linux/notifier.h>
 #include <linux/reboot.h>
 #include <asm/byteorder.h>
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_hdcp.h>
+#include <drm/drm_probe_helper.h>
 #include "intel_drv.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -304,9 +303,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
 static int icl_max_source_rate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        enum port port = dig_port->base.port;
 
-       if (port == PORT_B)
+       if (intel_port_is_combophy(dev_priv, port) &&
+           !intel_dp_is_edp(intel_dp))
                return 540000;
 
        return 810000;
@@ -344,7 +345,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        if (INTEL_GEN(dev_priv) >= 10) {
                source_rates = cnl_rates;
                size = ARRAY_SIZE(cnl_rates);
-               if (IS_GEN10(dev_priv))
+               if (IS_GEN(dev_priv, 10))
                        max_rate = cnl_max_source_rate(intel_dp);
                else
                        max_rate = icl_max_source_rate(intel_dp);
@@ -428,7 +429,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
 }
 
 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
-                                      uint8_t lane_count)
+                                      u8 lane_count)
 {
        /*
         * FIXME: we need to synchronize the current link parameters with
@@ -448,7 +449,7 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
 
 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
                                                     int link_rate,
-                                                    uint8_t lane_count)
+                                                    u8 lane_count)
 {
        const struct drm_display_mode *fixed_mode =
                intel_dp->attached_connector->panel.fixed_mode;
@@ -463,7 +464,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
 }
 
 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
-                                           int link_rate, uint8_t lane_count)
+                                           int link_rate, u8 lane_count)
 {
        int index;
 
@@ -571,19 +572,19 @@ intel_dp_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
 {
-       int     i;
-       uint32_t v = 0;
+       int i;
+       u32 v = 0;
 
        if (src_bytes > 4)
                src_bytes = 4;
        for (i = 0; i < src_bytes; i++)
-               v |= ((uint32_t) src[i]) << ((3-i) * 8);
+               v |= ((u32)src[i]) << ((3 - i) * 8);
        return v;
 }
 
-static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
 {
        int i;
        if (dst_bytes > 4)
@@ -600,30 +601,39 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 static void
 intel_dp_pps_init(struct intel_dp *intel_dp);
 
-static void pps_lock(struct intel_dp *intel_dp)
+static intel_wakeref_t
+pps_lock(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       intel_wakeref_t wakeref;
 
        /*
         * See intel_power_sequencer_reset() why we need
         * a power domain reference here.
         */
-       intel_display_power_get(dev_priv,
-                               intel_aux_power_domain(dp_to_dig_port(intel_dp)));
+       wakeref = intel_display_power_get(dev_priv,
+                                         intel_aux_power_domain(dp_to_dig_port(intel_dp)));
 
        mutex_lock(&dev_priv->pps_mutex);
+
+       return wakeref;
 }
 
-static void pps_unlock(struct intel_dp *intel_dp)
+static intel_wakeref_t
+pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        mutex_unlock(&dev_priv->pps_mutex);
-
        intel_display_power_put(dev_priv,
-                               intel_aux_power_domain(dp_to_dig_port(intel_dp)));
+                               intel_aux_power_domain(dp_to_dig_port(intel_dp)),
+                               wakeref);
+       return 0;
 }
 
+#define with_pps_lock(dp, wf) \
+       for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
+
 static void
 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 {
@@ -633,7 +643,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        bool pll_enabled, release_cl_override = false;
        enum dpio_phy phy = DPIO_PHY(pipe);
        enum dpio_channel ch = vlv_pipe_to_channel(pipe);
-       uint32_t DP;
+       u32 DP;
 
        if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
                 "skipping pipe %c power sequencer kick due to port %c being active\n",
@@ -972,30 +982,29 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
        struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
                                                 edp_notifier);
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       intel_wakeref_t wakeref;
 
        if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
                return 0;
 
-       pps_lock(intel_dp);
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
-               i915_reg_t pp_ctrl_reg, pp_div_reg;
-               u32 pp_div;
-
-               pp_ctrl_reg = PP_CONTROL(pipe);
-               pp_div_reg  = PP_DIVISOR(pipe);
-               pp_div = I915_READ(pp_div_reg);
-               pp_div &= PP_REFERENCE_DIVIDER_MASK;
-
-               /* 0x1F write to PP_DIV_REG sets max cycle delay */
-               I915_WRITE(pp_div_reg, pp_div | 0x1F);
-               I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
-               msleep(intel_dp->panel_power_cycle_delay);
+       with_pps_lock(intel_dp, wakeref) {
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+                       enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+                       i915_reg_t pp_ctrl_reg, pp_div_reg;
+                       u32 pp_div;
+
+                       pp_ctrl_reg = PP_CONTROL(pipe);
+                       pp_div_reg  = PP_DIVISOR(pipe);
+                       pp_div = I915_READ(pp_div_reg);
+                       pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+                       /* 0x1F write to PP_DIV_REG sets max cycle delay */
+                       I915_WRITE(pp_div_reg, pp_div | 0x1F);
+                       I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
+                       msleep(intel_dp->panel_power_cycle_delay);
+               }
        }
 
-       pps_unlock(intel_dp);
-
        return 0;
 }
 
@@ -1041,17 +1050,21 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
        }
 }
 
-static uint32_t
+static u32
 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
-       uint32_t status;
+       u32 status;
        bool done;
 
 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
        done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
                                  msecs_to_jiffies_timeout(10));
+
+       /* just trace the final value */
+       trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
        if (!done)
                DRM_ERROR("dp aux hw did not signal timeout!\n");
 #undef C
@@ -1059,7 +1072,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
        return status;
 }
 
-static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
@@ -1073,7 +1086,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
        return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 }
 
-static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1092,7 +1105,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
                return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 }
 
-static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1109,7 +1122,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
        return ilk_get_aux_clock_divider(intel_dp, index);
 }
 
-static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
        /*
         * SKL doesn't need us to program the AUX clock divider (Hardware will
@@ -1119,16 +1132,16 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
        return index ? 0 : 1;
 }
 
-static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
-                                    int send_bytes,
-                                    uint32_t aux_clock_divider)
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+                               int send_bytes,
+                               u32 aux_clock_divider)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv =
                        to_i915(intel_dig_port->base.base.dev);
-       uint32_t precharge, timeout;
+       u32 precharge, timeout;
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN(dev_priv, 6))
                precharge = 3;
        else
                precharge = 5;
@@ -1149,12 +1162,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
               (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
 }
 
-static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
-                                     int send_bytes,
-                                     uint32_t unused)
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+                               int send_bytes,
+                               u32 unused)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       uint32_t ret;
+       u32 ret;
 
        ret = DP_AUX_CH_CTL_SEND_BUSY |
              DP_AUX_CH_CTL_DONE |
@@ -1174,25 +1187,26 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
 
 static int
 intel_dp_aux_xfer(struct intel_dp *intel_dp,
-                 const uint8_t *send, int send_bytes,
-                 uint8_t *recv, int recv_size,
+                 const u8 *send, int send_bytes,
+                 u8 *recv, int recv_size,
                  u32 aux_send_ctl_flags)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv =
                        to_i915(intel_dig_port->base.base.dev);
        i915_reg_t ch_ctl, ch_data[5];
-       uint32_t aux_clock_divider;
+       u32 aux_clock_divider;
+       intel_wakeref_t wakeref;
        int i, ret, recv_bytes;
-       uint32_t status;
        int try, clock = 0;
+       u32 status;
        bool vdd;
 
        ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
        for (i = 0; i < ARRAY_SIZE(ch_data); i++)
                ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
 
-       pps_lock(intel_dp);
+       wakeref = pps_lock(intel_dp);
 
        /*
         * We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -1217,6 +1231,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
                        break;
                msleep(1);
        }
+       /* just trace the final value */
+       trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
 
        if (try == 3) {
                static u32 last_status = -1;
@@ -1336,7 +1352,7 @@ out:
        if (vdd)
                edp_panel_vdd_off(intel_dp, false);
 
-       pps_unlock(intel_dp);
+       pps_unlock(intel_dp, wakeref);
 
        return ret;
 }
@@ -1358,7 +1374,7 @@ static ssize_t
 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 {
        struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
-       uint8_t txbuf[20], rxbuf[20];
+       u8 txbuf[20], rxbuf[20];
        size_t txsize, rxsize;
        int ret;
 
@@ -1691,7 +1707,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
 }
 
 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
-                          uint8_t *link_bw, uint8_t *rate_select)
+                          u8 *link_bw, u8 *rate_select)
 {
        /* eDP 1.4 rate select method. */
        if (intel_dp->use_rate_select) {
@@ -1808,7 +1824,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
 }
 
 /* Optimize link config in order: max bpp, min clock, min lanes */
-static bool
+static int
 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
                                  struct intel_crtc_state *pipe_config,
                                  const struct link_config_limits *limits)
@@ -1834,17 +1850,17 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
                                        pipe_config->pipe_bpp = bpp;
                                        pipe_config->port_clock = link_clock;
 
-                                       return true;
+                                       return 0;
                                }
                        }
                }
        }
 
-       return false;
+       return -EINVAL;
 }
 
 /* Optimize link config in order: max bpp, min lanes, min clock */
-static bool
+static int
 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
                                  struct intel_crtc_state *pipe_config,
                                  const struct link_config_limits *limits)
@@ -1870,13 +1886,13 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
                                        pipe_config->pipe_bpp = bpp;
                                        pipe_config->port_clock = link_clock;
 
-                                       return true;
+                                       return 0;
                                }
                        }
                }
        }
 
-       return false;
+       return -EINVAL;
 }
 
 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
@@ -1894,19 +1910,20 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
        return 0;
 }
 
-static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
-                                       struct intel_crtc_state *pipe_config,
-                                       struct drm_connector_state *conn_state,
-                                       struct link_config_limits *limits)
+static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+                                      struct intel_crtc_state *pipe_config,
+                                      struct drm_connector_state *conn_state,
+                                      struct link_config_limits *limits)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        u8 dsc_max_bpc;
        int pipe_bpp;
+       int ret;
 
        if (!intel_dp_supports_dsc(intel_dp, pipe_config))
-               return false;
+               return -EINVAL;
 
        dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
                            conn_state->max_requested_bpc);
@@ -1914,7 +1931,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
        pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
        if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
                DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
-               return false;
+               return -EINVAL;
        }
 
        /*
@@ -1948,7 +1965,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
                                                     adjusted_mode->crtc_hdisplay);
                if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
                        DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
-                       return false;
+                       return -EINVAL;
                }
                pipe_config->dsc_params.compressed_bpp = min_t(u16,
                                                               dsc_max_output_bpp >> 4,
@@ -1965,16 +1982,19 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
                        pipe_config->dsc_params.dsc_split = true;
                } else {
                        DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
-                       return false;
+                       return -EINVAL;
                }
        }
-       if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+
+       ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
+       if (ret < 0) {
                DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
                              "Compressed BPP = %d\n",
                              pipe_config->pipe_bpp,
                              pipe_config->dsc_params.compressed_bpp);
-               return false;
+               return ret;
        }
+
        pipe_config->dsc_params.compression_enable = true;
        DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
                      "Compressed Bpp = %d Slice Count = %d\n",
@@ -1982,10 +2002,10 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
                      pipe_config->dsc_params.compressed_bpp,
                      pipe_config->dsc_params.slice_count);
 
-       return true;
+       return 0;
 }
 
-static bool
+static int
 intel_dp_compute_link_config(struct intel_encoder *encoder,
                             struct intel_crtc_state *pipe_config,
                             struct drm_connector_state *conn_state)
@@ -1994,7 +2014,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct link_config_limits limits;
        int common_len;
-       bool ret;
+       int ret;
 
        common_len = intel_dp_common_len_rate_limit(intel_dp,
                                                    intel_dp->max_link_rate);
@@ -2051,10 +2071,12 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                                                        &limits);
 
        /* enable compression if the mode doesn't fit available BW */
-       if (!ret) {
-               if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
-                                                conn_state, &limits))
-                       return false;
+       DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
+       if (ret || intel_dp->force_dsc_en) {
+               ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
+                                                 conn_state, &limits);
+               if (ret < 0)
+                       return ret;
        }
 
        if (pipe_config->dsc_params.compression_enable) {
@@ -2079,10 +2101,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                              intel_dp_max_data_rate(pipe_config->port_clock,
                                                     pipe_config->lane_count));
        }
-       return true;
+       return 0;
 }
 
-bool
+int
 intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config,
                        struct drm_connector_state *conn_state)
@@ -2098,6 +2120,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                to_intel_digital_connector_state(conn_state);
        bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
                                           DP_DPCD_QUIRK_CONSTANT_N);
+       int ret;
 
        if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
                pipe_config->has_pch_encoder = true;
@@ -2119,14 +2142,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                                       adjusted_mode);
 
                if (INTEL_GEN(dev_priv) >= 9) {
-                       int ret;
-
                        ret = skl_update_scaler_crtc(pipe_config);
                        if (ret)
                                return ret;
                }
 
-               if (HAS_GMCH_DISPLAY(dev_priv))
+               if (HAS_GMCH(dev_priv))
                        intel_gmch_panel_fitting(intel_crtc, pipe_config,
                                                 conn_state->scaling_mode);
                else
@@ -2135,20 +2156,21 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        }
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
-       if (HAS_GMCH_DISPLAY(dev_priv) &&
+       if (HAS_GMCH(dev_priv) &&
            adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-               return false;
+               return -EINVAL;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
-               return false;
+               return -EINVAL;
 
        pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
                                  intel_dp_supports_fec(intel_dp, pipe_config);
 
-       if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
-               return false;
+       ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
+       if (ret < 0)
+               return ret;
 
        if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
                /*
@@ -2196,11 +2218,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
        intel_psr_compute_config(intel_dp, pipe_config);
 
-       return true;
+       return 0;
 }
 
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-                             int link_rate, uint8_t lane_count,
+                             int link_rate, u8 lane_count,
                              bool link_mst)
 {
        intel_dp->link_trained = false;
@@ -2462,15 +2484,15 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
  */
 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
+       intel_wakeref_t wakeref;
        bool vdd;
 
        if (!intel_dp_is_edp(intel_dp))
                return;
 
-       pps_lock(intel_dp);
-       vdd = edp_panel_vdd_on(intel_dp);
-       pps_unlock(intel_dp);
-
+       vdd = false;
+       with_pps_lock(intel_dp, wakeref)
+               vdd = edp_panel_vdd_on(intel_dp);
        I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
             port_name(dp_to_dig_port(intel_dp)->base.port));
 }
@@ -2509,19 +2531,21 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
        if ((pp & PANEL_POWER_ON) == 0)
                intel_dp->panel_power_off_time = ktime_get_boottime();
 
-       intel_display_power_put(dev_priv,
-                               intel_aux_power_domain(intel_dig_port));
+       intel_display_power_put_unchecked(dev_priv,
+                                         intel_aux_power_domain(intel_dig_port));
 }
 
 static void edp_panel_vdd_work(struct work_struct *__work)
 {
-       struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
-                                                struct intel_dp, panel_vdd_work);
+       struct intel_dp *intel_dp =
+               container_of(to_delayed_work(__work),
+                            struct intel_dp, panel_vdd_work);
+       intel_wakeref_t wakeref;
 
-       pps_lock(intel_dp);
-       if (!intel_dp->want_panel_vdd)
-               edp_panel_vdd_off_sync(intel_dp);
-       pps_unlock(intel_dp);
+       with_pps_lock(intel_dp, wakeref) {
+               if (!intel_dp->want_panel_vdd)
+                       edp_panel_vdd_off_sync(intel_dp);
+       }
 }
 
 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
@@ -2585,7 +2609,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                /* ILK workaround: disable reset around power sequence */
                pp &= ~PANEL_POWER_RESET;
                I915_WRITE(pp_ctrl_reg, pp);
@@ -2593,7 +2617,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
        }
 
        pp |= PANEL_POWER_ON;
-       if (!IS_GEN5(dev_priv))
+       if (!IS_GEN(dev_priv, 5))
                pp |= PANEL_POWER_RESET;
 
        I915_WRITE(pp_ctrl_reg, pp);
@@ -2602,7 +2626,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
        wait_panel_on(intel_dp);
        intel_dp->last_power_on = jiffies;
 
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
                I915_WRITE(pp_ctrl_reg, pp);
                POSTING_READ(pp_ctrl_reg);
@@ -2611,12 +2635,13 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 
 void intel_edp_panel_on(struct intel_dp *intel_dp)
 {
+       intel_wakeref_t wakeref;
+
        if (!intel_dp_is_edp(intel_dp))
                return;
 
-       pps_lock(intel_dp);
-       edp_panel_on(intel_dp);
-       pps_unlock(intel_dp);
+       with_pps_lock(intel_dp, wakeref)
+               edp_panel_on(intel_dp);
 }
 
 
@@ -2655,25 +2680,25 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        intel_dp->panel_power_off_time = ktime_get_boottime();
 
        /* We got a reference when we enabled the VDD. */
-       intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
+       intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
 }
 
 void intel_edp_panel_off(struct intel_dp *intel_dp)
 {
+       intel_wakeref_t wakeref;
+
        if (!intel_dp_is_edp(intel_dp))
                return;
 
-       pps_lock(intel_dp);
-       edp_panel_off(intel_dp);
-       pps_unlock(intel_dp);
+       with_pps_lock(intel_dp, wakeref)
+               edp_panel_off(intel_dp);
 }
 
 /* Enable backlight in the panel power control. */
 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u32 pp;
-       i915_reg_t pp_ctrl_reg;
+       intel_wakeref_t wakeref;
 
        /*
         * If we enable the backlight right away following a panel power
@@ -2683,17 +2708,16 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
         */
        wait_backlight_on(intel_dp);
 
-       pps_lock(intel_dp);
+       with_pps_lock(intel_dp, wakeref) {
+               i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+               u32 pp;
 
-       pp = ironlake_get_pp_control(intel_dp);
-       pp |= EDP_BLC_ENABLE;
-
-       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
-       I915_WRITE(pp_ctrl_reg, pp);
-       POSTING_READ(pp_ctrl_reg);
+               pp = ironlake_get_pp_control(intel_dp);
+               pp |= EDP_BLC_ENABLE;
 
-       pps_unlock(intel_dp);
+               I915_WRITE(pp_ctrl_reg, pp);
+               POSTING_READ(pp_ctrl_reg);
+       }
 }
 
 /* Enable backlight PWM and backlight PP control. */
@@ -2715,23 +2739,21 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u32 pp;
-       i915_reg_t pp_ctrl_reg;
+       intel_wakeref_t wakeref;
 
        if (!intel_dp_is_edp(intel_dp))
                return;
 
-       pps_lock(intel_dp);
+       with_pps_lock(intel_dp, wakeref) {
+               i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+               u32 pp;
 
-       pp = ironlake_get_pp_control(intel_dp);
-       pp &= ~EDP_BLC_ENABLE;
-
-       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+               pp = ironlake_get_pp_control(intel_dp);
+               pp &= ~EDP_BLC_ENABLE;
 
-       I915_WRITE(pp_ctrl_reg, pp);
-       POSTING_READ(pp_ctrl_reg);
-
-       pps_unlock(intel_dp);
+               I915_WRITE(pp_ctrl_reg, pp);
+               POSTING_READ(pp_ctrl_reg);
+       }
 
        intel_dp->last_backlight_off = jiffies;
        edp_wait_backlight_off(intel_dp);
@@ -2759,12 +2781,12 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
                                      bool enable)
 {
        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+       intel_wakeref_t wakeref;
        bool is_enabled;
 
-       pps_lock(intel_dp);
-       is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
-       pps_unlock(intel_dp);
-
+       is_enabled = false;
+       with_pps_lock(intel_dp, wakeref)
+               is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
        if (is_enabled == enable)
                return;
 
@@ -2831,7 +2853,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
         * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
         * 2. Program DP PLL enable
         */
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
 
        intel_dp->DP |= DP_PLL_ENABLE;
@@ -2981,16 +3003,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       intel_wakeref_t wakeref;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return false;
 
        ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
                                    encoder->port, pipe);
 
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
 
        return ret;
 }
@@ -3158,20 +3182,20 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
 
 static void
 _intel_dp_set_link_train(struct intel_dp *intel_dp,
-                        uint32_t *DP,
-                        uint8_t dp_train_pat)
+                        u32 *DP,
+                        u8 dp_train_pat)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
-       uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
+       u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
 
        if (dp_train_pat & train_pat_mask)
                DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
                              dp_train_pat & train_pat_mask);
 
        if (HAS_DDI(dev_priv)) {
-               uint32_t temp = I915_READ(DP_TP_CTL(port));
+               u32 temp = I915_READ(DP_TP_CTL(port));
 
                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
                        temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3270,24 +3294,23 @@ static void intel_enable_dp(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-       uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+       u32 dp_reg = I915_READ(intel_dp->output_reg);
        enum pipe pipe = crtc->pipe;
+       intel_wakeref_t wakeref;
 
        if (WARN_ON(dp_reg & DP_PORT_EN))
                return;
 
-       pps_lock(intel_dp);
+       with_pps_lock(intel_dp, wakeref) {
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       vlv_init_panel_power_sequencer(encoder, pipe_config);
 
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               vlv_init_panel_power_sequencer(encoder, pipe_config);
+               intel_dp_enable_port(intel_dp, pipe_config);
 
-       intel_dp_enable_port(intel_dp, pipe_config);
-
-       edp_panel_vdd_on(intel_dp);
-       edp_panel_on(intel_dp);
-       edp_panel_vdd_off(intel_dp, true);
-
-       pps_unlock(intel_dp);
+               edp_panel_vdd_on(intel_dp);
+               edp_panel_on(intel_dp);
+               edp_panel_vdd_off(intel_dp, true);
+       }
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                unsigned int lane_mask = 0x0;
@@ -3490,14 +3513,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
  * link status information
  */
 bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
 {
        return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
                                DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
 /* These are source-specific values. */
-uint8_t
+u8
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -3516,8 +3539,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 }
 
-uint8_t
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -3562,12 +3585,12 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
        }
 }
 
-static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
+static u32 vlv_signal_levels(struct intel_dp *intel_dp)
 {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        unsigned long demph_reg_value, preemph_reg_value,
                uniqtranscale_reg_value;
-       uint8_t train_set = intel_dp->train_set[0];
+       u8 train_set = intel_dp->train_set[0];
 
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
        case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3648,12 +3671,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
        return 0;
 }
 
-static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
+static u32 chv_signal_levels(struct intel_dp *intel_dp)
 {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        u32 deemph_reg_value, margin_reg_value;
        bool uniq_trans_scale = false;
-       uint8_t train_set = intel_dp->train_set[0];
+       u8 train_set = intel_dp->train_set[0];
 
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
        case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3731,10 +3754,10 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
        return 0;
 }
 
-static uint32_t
-g4x_signal_levels(uint8_t train_set)
+static u32
+g4x_signal_levels(u8 train_set)
 {
-       uint32_t        signal_levels = 0;
+       u32 signal_levels = 0;
 
        switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3770,8 +3793,8 @@ g4x_signal_levels(uint8_t train_set)
 }
 
 /* SNB CPU eDP voltage swing and pre-emphasis control */
-static uint32_t
-snb_cpu_edp_signal_levels(uint8_t train_set)
+static u32
+snb_cpu_edp_signal_levels(u8 train_set)
 {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3798,8 +3821,8 @@ snb_cpu_edp_signal_levels(uint8_t train_set)
 }
 
 /* IVB CPU eDP voltage swing and pre-emphasis control */
-static uint32_t
-ivb_cpu_edp_signal_levels(uint8_t train_set)
+static u32
+ivb_cpu_edp_signal_levels(u8 train_set)
 {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3834,8 +3857,8 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
-       uint32_t signal_levels, mask = 0;
-       uint8_t train_set = intel_dp->train_set[0];
+       u32 signal_levels, mask = 0;
+       u8 train_set = intel_dp->train_set[0];
 
        if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
                signal_levels = bxt_signal_levels(intel_dp);
@@ -3849,7 +3872,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
        } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
                signal_levels = ivb_cpu_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
-       } else if (IS_GEN6(dev_priv) && port == PORT_A) {
+       } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
                signal_levels = snb_cpu_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
        } else {
@@ -3874,7 +3897,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
-                                      uint8_t dp_train_pat)
+                                      u8 dp_train_pat)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv =
@@ -3891,7 +3914,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
-       uint32_t val;
+       u32 val;
 
        if (!HAS_DDI(dev_priv))
                return;
@@ -3926,7 +3949,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        enum port port = encoder->port;
-       uint32_t DP = intel_dp->DP;
+       u32 DP = intel_dp->DP;
 
        if (WARN_ON(HAS_DDI(dev_priv)))
                return;
@@ -3985,12 +4008,49 @@ intel_dp_link_down(struct intel_encoder *encoder,
        intel_dp->DP = DP;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               pps_lock(intel_dp);
-               intel_dp->active_pipe = INVALID_PIPE;
-               pps_unlock(intel_dp);
+               intel_wakeref_t wakeref;
+
+               with_pps_lock(intel_dp, wakeref)
+                       intel_dp->active_pipe = INVALID_PIPE;
        }
 }
 
+static void
+intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
+{
+       u8 dpcd_ext[6];
+
+       /*
+        * Prior to DP1.3 the bit represented by
+        * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
+        * if it is set DP_DPCD_REV at 0000h could be at a value less than
+        * the true capability of the panel. The only way to check is to
+        * then compare 0000h and 2200h.
+        */
+       if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+             DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
+               return;
+
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
+                            &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
+               DRM_ERROR("DPCD failed read at extended capabilities\n");
+               return;
+       }
+
+       if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
+               DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
+               return;
+       }
+
+       if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
+               return;
+
+       DRM_DEBUG_KMS("Base DPCD: %*ph\n",
+                     (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
+
+       memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
+}
+
 bool
 intel_dp_read_dpcd(struct intel_dp *intel_dp)
 {
@@ -3998,6 +4058,8 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
                             sizeof(intel_dp->dpcd)) < 0)
                return false; /* aux transfer failed */
 
+       intel_dp_extended_receiver_capabilities(intel_dp);
+
        DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
 
        return intel_dp->dpcd[DP_DPCD_REV] != 0;
@@ -4228,7 +4290,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
                DP_DPRX_ESI_LEN;
 }
 
-u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
                                int mode_clock, int mode_hdisplay)
 {
        u16 bits_per_pixel, max_bpp_small_joiner_ram;
@@ -4295,7 +4357,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
                return 0;
        }
        /* Also take into account max slice width */
-       min_slice_count = min_t(uint8_t, min_slice_count,
+       min_slice_count = min_t(u8, min_slice_count,
                                DIV_ROUND_UP(mode_hdisplay,
                                             max_slice_width));
 
@@ -4313,11 +4375,11 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
        return 0;
 }
 
-static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
 {
        int status = 0;
        int test_link_rate;
-       uint8_t test_lane_count, test_link_bw;
+       u8 test_lane_count, test_link_bw;
        /* (DP CTS 1.2)
         * 4.3.1.11
         */
@@ -4350,10 +4412,10 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
        return DP_TEST_ACK;
 }
 
-static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
 {
-       uint8_t test_pattern;
-       uint8_t test_misc;
+       u8 test_pattern;
+       u8 test_misc;
        __be16 h_width, v_height;
        int status = 0;
 
@@ -4411,9 +4473,9 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
        return DP_TEST_ACK;
 }
 
-static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
 {
-       uint8_t test_result = DP_TEST_ACK;
+       u8 test_result = DP_TEST_ACK;
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        struct drm_connector *connector = &intel_connector->base;
 
@@ -4455,16 +4517,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
        return test_result;
 }
 
-static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
 {
-       uint8_t test_result = DP_TEST_NAK;
+       u8 test_result = DP_TEST_NAK;
        return test_result;
 }
 
 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
 {
-       uint8_t response = DP_TEST_NAK;
-       uint8_t request = 0;
+       u8 response = DP_TEST_NAK;
+       u8 request = 0;
        int status;
 
        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
@@ -4552,12 +4614,10 @@ go_again:
 
                        return ret;
                } else {
-                       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
                        DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
                        intel_dp->is_mst = false;
-                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
-                       /* send a hotplug event */
-                       drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+                                                       intel_dp->is_mst);
                }
        }
        return -EINVAL;
@@ -4790,8 +4850,8 @@ static enum drm_connector_status
 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 {
        struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
-       uint8_t *dpcd = intel_dp->dpcd;
-       uint8_t type;
+       u8 *dpcd = intel_dp->dpcd;
+       u8 type;
 
        if (lspcon->active)
                lspcon_resume(lspcon);
@@ -5028,28 +5088,38 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
        return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
 }
 
+static const char *tc_type_name(enum tc_port_type type)
+{
+       static const char * const names[] = {
+               [TC_PORT_UNKNOWN] = "unknown",
+               [TC_PORT_LEGACY] = "legacy",
+               [TC_PORT_TYPEC] = "typec",
+               [TC_PORT_TBT] = "tbt",
+       };
+
+       if (WARN_ON(type >= ARRAY_SIZE(names)))
+               type = TC_PORT_UNKNOWN;
+
+       return names[type];
+}
+
 static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
                                    struct intel_digital_port *intel_dig_port,
                                    bool is_legacy, bool is_typec, bool is_tbt)
 {
        enum port port = intel_dig_port->base.port;
        enum tc_port_type old_type = intel_dig_port->tc_type;
-       const char *type_str;
 
        WARN_ON(is_legacy + is_typec + is_tbt != 1);
 
-       if (is_legacy) {
+       if (is_legacy)
                intel_dig_port->tc_type = TC_PORT_LEGACY;
-               type_str = "legacy";
-       } else if (is_typec) {
+       else if (is_typec)
                intel_dig_port->tc_type = TC_PORT_TYPEC;
-               type_str = "typec";
-       } else if (is_tbt) {
+       else if (is_tbt)
                intel_dig_port->tc_type = TC_PORT_TBT;
-               type_str = "tbt";
-       } else {
+       else
                return;
-       }
 
        /* Types are not supposed to be changed at runtime. */
        WARN_ON(old_type != TC_PORT_UNKNOWN &&
@@ -5057,12 +5127,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
 
        if (old_type != intel_dig_port->tc_type)
                DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
-                             type_str);
+                             tc_type_name(intel_dig_port->tc_type));
 }
 
-static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
-                                 struct intel_digital_port *dig_port);
-
 /*
  * This function implements the first part of the Connect Flow described by our
  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -5097,6 +5164,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
        val = I915_READ(PORT_TX_DFLEXDPPMS);
        if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
                DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
+               WARN_ON(dig_port->tc_legacy_port);
                return false;
        }
 
@@ -5128,8 +5196,8 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
  * See the comment at the connect function. This implements the Disconnect
  * Flow.
  */
-static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
-                                 struct intel_digital_port *dig_port)
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+                          struct intel_digital_port *dig_port)
 {
        enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
 
@@ -5149,6 +5217,10 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
                I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
        }
 
+       DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
+                     port_name(dig_port->base.port),
+                     tc_type_name(dig_port->tc_type));
+
        dig_port->tc_type = TC_PORT_UNKNOWN;
 }
 
@@ -5170,7 +5242,14 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
        bool is_legacy, is_typec, is_tbt;
        u32 dpsp;
 
-       is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port);
+       /*
+        * WARN if we got a legacy port HPD, but VBT didn't mark the port as
+        * legacy. Treat the port as legacy from now on.
+        */
+       if (WARN_ON(!intel_dig_port->tc_legacy_port &&
+                   I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
+               intel_dig_port->tc_legacy_port = true;
+       is_legacy = intel_dig_port->tc_legacy_port;
 
        /*
         * The spec says we shouldn't be using the ISR bits for detecting
@@ -5182,6 +5261,7 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
 
        if (!is_legacy && !is_typec && !is_tbt) {
                icl_tc_phy_disconnect(dev_priv, intel_dig_port);
+
                return false;
        }
 
@@ -5224,7 +5304,7 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (HAS_GMCH_DISPLAY(dev_priv)) {
+       if (HAS_GMCH(dev_priv)) {
                if (IS_GM45(dev_priv))
                        return gm45_digital_port_connected(encoder);
                else
@@ -5233,17 +5313,17 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
 
        if (INTEL_GEN(dev_priv) >= 11)
                return icl_digital_port_connected(encoder);
-       else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+       else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
                return spt_digital_port_connected(encoder);
        else if (IS_GEN9_LP(dev_priv))
                return bxt_digital_port_connected(encoder);
-       else if (IS_GEN8(dev_priv))
+       else if (IS_GEN(dev_priv, 8))
                return bdw_digital_port_connected(encoder);
-       else if (IS_GEN7(dev_priv))
+       else if (IS_GEN(dev_priv, 7))
                return ivb_digital_port_connected(encoder);
-       else if (IS_GEN6(dev_priv))
+       else if (IS_GEN(dev_priv, 6))
                return snb_digital_port_connected(encoder);
-       else if (IS_GEN5(dev_priv))
+       else if (IS_GEN(dev_priv, 5))
                return ilk_digital_port_connected(encoder);
 
        MISSING_CASE(INTEL_GEN(dev_priv));
@@ -5305,12 +5385,13 @@ intel_dp_detect(struct drm_connector *connector,
        enum drm_connector_status status;
        enum intel_display_power_domain aux_domain =
                intel_aux_power_domain(dig_port);
+       intel_wakeref_t wakeref;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
        WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
-       intel_display_power_get(dev_priv, aux_domain);
+       wakeref = intel_display_power_get(dev_priv, aux_domain);
 
        /* Can't disconnect eDP */
        if (intel_dp_is_edp(intel_dp))
@@ -5376,7 +5457,7 @@ intel_dp_detect(struct drm_connector *connector,
 
                ret = intel_dp_retrain_link(encoder, ctx);
                if (ret) {
-                       intel_display_power_put(dev_priv, aux_domain);
+                       intel_display_power_put(dev_priv, aux_domain, wakeref);
                        return ret;
                }
        }
@@ -5400,7 +5481,7 @@ out:
        if (status != connector_status_connected && !intel_dp->is_mst)
                intel_dp_unset_edid(intel_dp);
 
-       intel_display_power_put(dev_priv, aux_domain);
+       intel_display_power_put(dev_priv, aux_domain, wakeref);
        return status;
 }
 
@@ -5413,6 +5494,7 @@ intel_dp_force(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
        enum intel_display_power_domain aux_domain =
                intel_aux_power_domain(dig_port);
+       intel_wakeref_t wakeref;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
@@ -5421,11 +5503,11 @@ intel_dp_force(struct drm_connector *connector)
        if (connector->status != connector_status_connected)
                return;
 
-       intel_display_power_get(dev_priv, aux_domain);
+       wakeref = intel_display_power_get(dev_priv, aux_domain);
 
        intel_dp_set_edid(intel_dp);
 
-       intel_display_power_put(dev_priv, aux_domain);
+       intel_display_power_put(dev_priv, aux_domain, wakeref);
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5490,21 +5572,22 @@ intel_dp_connector_unregister(struct drm_connector *connector)
        intel_connector_unregister(connector);
 }
 
-void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
 {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_dp *intel_dp = &intel_dig_port->dp;
 
        intel_dp_mst_encoder_cleanup(intel_dig_port);
        if (intel_dp_is_edp(intel_dp)) {
+               intel_wakeref_t wakeref;
+
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                /*
                 * vdd might still be enabled do to the delayed vdd off.
                 * Make sure vdd is actually turned off here.
                 */
-               pps_lock(intel_dp);
-               edp_panel_vdd_off_sync(intel_dp);
-               pps_unlock(intel_dp);
+               with_pps_lock(intel_dp, wakeref)
+                       edp_panel_vdd_off_sync(intel_dp);
 
                if (intel_dp->edp_notifier.notifier_call) {
                        unregister_reboot_notifier(&intel_dp->edp_notifier);
@@ -5513,14 +5596,20 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        }
 
        intel_dp_aux_fini(intel_dp);
+}
+
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+       intel_dp_encoder_flush_work(encoder);
 
        drm_encoder_cleanup(encoder);
-       kfree(intel_dig_port);
+       kfree(enc_to_dig_port(encoder));
 }
 
 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+       intel_wakeref_t wakeref;
 
        if (!intel_dp_is_edp(intel_dp))
                return;
@@ -5530,9 +5619,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
         * Make sure vdd is actually turned off here.
         */
        cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-       pps_lock(intel_dp);
-       edp_panel_vdd_off_sync(intel_dp);
-       pps_unlock(intel_dp);
+       with_pps_lock(intel_dp, wakeref)
+               edp_panel_vdd_off_sync(intel_dp);
 }
 
 static
@@ -5545,7 +5633,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
                .address = DP_AUX_HDCP_AKSV,
                .size = DRM_HDCP_KSV_LEN,
        };
-       uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
+       u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
        ssize_t dpcd_ret;
        int ret;
 
@@ -5578,7 +5666,12 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        }
 
        reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
-       return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
+       if (reply != DP_AUX_NATIVE_REPLY_ACK) {
+               DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
+                             reply);
+               return -EIO;
+       }
+       return 0;
 }
 
 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
@@ -5808,6 +5901,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
        struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+       intel_wakeref_t wakeref;
 
        if (!HAS_DDI(dev_priv))
                intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5817,18 +5911,19 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
 
        intel_dp->reset_link_params = true;
 
-       pps_lock(intel_dp);
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+       with_pps_lock(intel_dp, wakeref) {
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       intel_dp->active_pipe = vlv_active_pipe(intel_dp);
 
-       if (intel_dp_is_edp(intel_dp)) {
-               /* Reinit the power sequencer, in case BIOS did something with it. */
-               intel_dp_pps_init(intel_dp);
-               intel_edp_panel_vdd_sanitize(intel_dp);
+               if (intel_dp_is_edp(intel_dp)) {
+                       /*
+                        * Reinit the power sequencer, in case BIOS did
+                        * something nasty with it.
+                        */
+                       intel_dp_pps_init(intel_dp);
+                       intel_edp_panel_vdd_sanitize(intel_dp);
+               }
        }
-
-       pps_unlock(intel_dp);
 }
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -5861,6 +5956,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum irqreturn ret = IRQ_NONE;
+       intel_wakeref_t wakeref;
 
        if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
                /*
@@ -5883,8 +5979,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                return IRQ_NONE;
        }
 
-       intel_display_power_get(dev_priv,
-                               intel_aux_power_domain(intel_dig_port));
+       wakeref = intel_display_power_get(dev_priv,
+                                         intel_aux_power_domain(intel_dig_port));
 
        if (intel_dp->is_mst) {
                if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5914,7 +6010,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
 put_power:
        intel_display_power_put(dev_priv,
-                               intel_aux_power_domain(intel_dig_port));
+                               intel_aux_power_domain(intel_dig_port),
+                               wakeref);
 
        return ret;
 }
@@ -5945,7 +6042,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
                intel_attach_force_audio_property(connector);
 
        intel_attach_broadcast_rgb_property(connector);
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                drm_connector_attach_max_bpc_property(connector, 6, 10);
        else if (INTEL_GEN(dev_priv) >= 5)
                drm_connector_attach_max_bpc_property(connector, 6, 12);
@@ -5954,7 +6051,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
                u32 allowed_scalers;
 
                allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
-               if (!HAS_GMCH_DISPLAY(dev_priv))
+               if (!HAS_GMCH(dev_priv))
                        allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
 
                drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
@@ -6361,8 +6458,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
        }
 
        mutex_lock(&dev_priv->drrs.mutex);
-       if (WARN_ON(dev_priv->drrs.dp)) {
-               DRM_ERROR("DRRS already enabled\n");
+       if (dev_priv->drrs.dp) {
+               DRM_DEBUG_KMS("DRRS already enabled\n");
                goto unlock;
        }
 
@@ -6622,8 +6719,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        struct drm_display_mode *downclock_mode = NULL;
        bool has_dpcd;
        struct drm_display_mode *scan;
-       struct edid *edid;
        enum pipe pipe = INVALID_PIPE;
+       intel_wakeref_t wakeref;
+       struct edid *edid;
 
        if (!intel_dp_is_edp(intel_dp))
                return true;
@@ -6643,13 +6741,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
                return false;
        }
 
-       pps_lock(intel_dp);
-
-       intel_dp_init_panel_power_timestamps(intel_dp);
-       intel_dp_pps_init(intel_dp);
-       intel_edp_panel_vdd_sanitize(intel_dp);
-
-       pps_unlock(intel_dp);
+       with_pps_lock(intel_dp, wakeref) {
+               intel_dp_init_panel_power_timestamps(intel_dp);
+               intel_dp_pps_init(intel_dp);
+               intel_edp_panel_vdd_sanitize(intel_dp);
+       }
 
        /* Cache DPCD and EDID for edp. */
        has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -6734,9 +6830,8 @@ out_vdd_off:
         * vdd might still be enabled do to the delayed vdd off.
         * Make sure vdd is actually turned off here.
         */
-       pps_lock(intel_dp);
-       edp_panel_vdd_off_sync(intel_dp);
-       pps_unlock(intel_dp);
+       with_pps_lock(intel_dp, wakeref)
+               edp_panel_vdd_off_sync(intel_dp);
 
        return false;
 }
@@ -6828,7 +6923,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
-       if (!HAS_GMCH_DISPLAY(dev_priv))
+       if (!HAS_GMCH(dev_priv))
                connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
@@ -6910,6 +7005,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
        intel_encoder->compute_config = intel_dp_compute_config;
        intel_encoder->get_hw_state = intel_dp_get_hw_state;
        intel_encoder->get_config = intel_dp_get_config;
+       intel_encoder->update_pipe = intel_panel_update_backlight;
        intel_encoder->suspend = intel_dp_encoder_suspend;
        if (IS_CHERRYVIEW(dev_priv)) {
                intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
@@ -7004,7 +7100,10 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
                        continue;
 
                ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
-               if (ret)
-                       intel_dp_check_mst_status(intel_dp);
+               if (ret) {
+                       intel_dp->is_mst = false;
+                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+                                                       false);
+               }
        }
 }
index 30be0e39bd5fda1afd705f44f840260d6177abbf..b59c87daa4f7a3e63251f8d681c55512c77a8ef6 100644 (file)
@@ -24,7 +24,7 @@
 #include "intel_drv.h"
 
 static void
-intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
 {
 
        DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
@@ -34,17 +34,17 @@ intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
 
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp,
-                      const uint8_t link_status[DP_LINK_STATUS_SIZE])
+                      const u8 link_status[DP_LINK_STATUS_SIZE])
 {
-       uint8_t v = 0;
-       uint8_t p = 0;
+       u8 v = 0;
+       u8 p = 0;
        int lane;
-       uint8_t voltage_max;
-       uint8_t preemph_max;
+       u8 voltage_max;
+       u8 preemph_max;
 
        for (lane = 0; lane < intel_dp->lane_count; lane++) {
-               uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
-               uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+               u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+               u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
                if (this_v > v)
                        v = this_v;
@@ -66,9 +66,9 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
 
 static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
-                       uint8_t dp_train_pat)
+                       u8 dp_train_pat)
 {
-       uint8_t buf[sizeof(intel_dp->train_set) + 1];
+       u8 buf[sizeof(intel_dp->train_set) + 1];
        int ret, len;
 
        intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
@@ -92,7 +92,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
 
 static bool
 intel_dp_reset_link_train(struct intel_dp *intel_dp,
-                       uint8_t dp_train_pat)
+                       u8 dp_train_pat)
 {
        memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
        intel_dp_set_signal_levels(intel_dp);
@@ -128,11 +128,11 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
 static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
-       uint8_t voltage;
+       u8 voltage;
        int voltage_tries, cr_tries, max_cr_tries;
        bool max_vswing_reached = false;
-       uint8_t link_config[2];
-       uint8_t link_bw, rate_select;
+       u8 link_config[2];
+       u8 link_bw, rate_select;
 
        if (intel_dp->prepare_link_retrain)
                intel_dp->prepare_link_retrain(intel_dp);
@@ -186,7 +186,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 
        voltage_tries = 1;
        for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
-               uint8_t link_status[DP_LINK_STATUS_SIZE];
+               u8 link_status[DP_LINK_STATUS_SIZE];
 
                drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
 
@@ -282,7 +282,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 {
        int tries;
        u32 training_pattern;
-       uint8_t link_status[DP_LINK_STATUS_SIZE];
+       u8 link_status[DP_LINK_STATUS_SIZE];
        bool channel_eq = false;
 
        training_pattern = intel_dp_training_pattern(intel_dp);
index f05427b74e348465a6bb162ede4e52d5ef59b2a5..cdb83d294cdd397efd3a1c5f95f2853f2733282b 100644 (file)
  *
  */
 
-#include <drm/drmP.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
-static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
-                                       struct intel_crtc_state *pipe_config,
-                                       struct drm_connector_state *conn_state)
+static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
+                                      struct intel_crtc_state *pipe_config,
+                                      struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
@@ -41,15 +40,19 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        struct drm_connector *connector = conn_state->connector;
        void *port = to_intel_connector(connector)->port;
        struct drm_atomic_state *state = pipe_config->base.state;
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_crtc_state *old_crtc_state =
+               drm_atomic_get_old_crtc_state(state, crtc);
        int bpp;
-       int lane_count, slots = 0;
+       int lane_count, slots =
+               to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int mst_pbn;
        bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
                                           DP_DPCD_QUIRK_CONSTANT_N);
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->has_pch_encoder = false;
@@ -86,7 +89,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
                if (slots < 0) {
                        DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
                                      slots);
-                       return false;
+                       return slots;
                }
        }
 
@@ -104,38 +107,42 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
        intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
 
-       return true;
+       return 0;
 }
 
-static int intel_dp_mst_atomic_check(struct drm_connector *connector,
-               struct drm_connector_state *new_conn_state)
+static int
+intel_dp_mst_atomic_check(struct drm_connector *connector,
+                         struct drm_connector_state *new_conn_state)
 {
        struct drm_atomic_state *state = new_conn_state->state;
-       struct drm_connector_state *old_conn_state;
-       struct drm_crtc *old_crtc;
+       struct drm_connector_state *old_conn_state =
+               drm_atomic_get_old_connector_state(state, connector);
+       struct intel_connector *intel_connector =
+               to_intel_connector(connector);
+       struct drm_crtc *new_crtc = new_conn_state->crtc;
        struct drm_crtc_state *crtc_state;
-       int slots, ret = 0;
-
-       old_conn_state = drm_atomic_get_old_connector_state(state, connector);
-       old_crtc = old_conn_state->crtc;
-       if (!old_crtc)
-               return ret;
+       struct drm_dp_mst_topology_mgr *mgr;
+       int ret = 0;
 
-       crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
-       slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu;
-       if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
-               struct drm_dp_mst_topology_mgr *mgr;
-               struct drm_encoder *old_encoder;
+       if (!old_conn_state->crtc)
+               return 0;
 
-               old_encoder = old_conn_state->best_encoder;
-               mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr;
+       /* We only want to free VCPI if this state disables the CRTC on this
+        * connector
+        */
+       if (new_crtc) {
+               crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
 
-               ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
-               if (ret)
-                       DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret);
-               else
-                       to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0;
+               if (!crtc_state ||
+                   !drm_atomic_crtc_needs_modeset(crtc_state) ||
+                   crtc_state->enable)
+                       return 0;
        }
+
+       mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
+       ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
+                                              intel_connector->port);
+
        return ret;
 }
 
@@ -240,7 +247,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
        struct intel_connector *connector =
                to_intel_connector(conn_state->connector);
        int ret;
-       uint32_t temp;
+       u32 temp;
 
        /* MST encoders are bound to a crtc, not to a connector,
         * force the mapping here for get_hw_state.
@@ -457,6 +464,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
        intel_connector->mst_port = intel_dp;
        intel_connector->port = port;
+       drm_dp_mst_get_port_malloc(port);
 
        connector = &intel_connector->base;
        ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
index 3c7f10d1765824ba32d0899b6e50e84d488518e8..95cb8b154f87938946b22d1f867ac43e38fe9c37 100644 (file)
@@ -413,7 +413,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
        }
 
        if (phy_info->rcomp_phy != -1) {
-               uint32_t grc_code;
+               u32 grc_code;
 
                bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
 
@@ -445,7 +445,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 {
        const struct bxt_ddi_phy_info *phy_info;
-       uint32_t val;
+       u32 val;
 
        phy_info = bxt_get_phy_info(dev_priv, phy);
 
@@ -515,7 +515,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
                              enum dpio_phy phy)
 {
        const struct bxt_ddi_phy_info *phy_info;
-       uint32_t mask;
+       u32 mask;
        bool ok;
 
        phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -567,8 +567,8 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
 #undef _CHK
 }
 
-uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
+u8
+bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
 {
        switch (lane_count) {
        case 1:
@@ -585,7 +585,7 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
 }
 
 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
-                                    uint8_t lane_lat_optim_mask)
+                                    u8 lane_lat_optim_mask)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
@@ -610,7 +610,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
        }
 }
 
-uint8_t
+u8
 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -618,7 +618,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
        enum dpio_phy phy;
        enum dpio_channel ch;
        int lane;
-       uint8_t mask;
+       u8 mask;
 
        bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
 
@@ -739,7 +739,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
        enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        enum pipe pipe = crtc->pipe;
-       uint32_t val;
+       u32 val;
 
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
        if (reset)
index d513ca875c67693cf508c389367febdd3ae320d0..0a42d11c4c3377e36886442a1748b72e167ddb85 100644 (file)
@@ -247,7 +247,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
                       enum intel_dpll_id range_max)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll;
+       struct intel_shared_dpll *pll, *unused_pll = NULL;
        struct intel_shared_dpll_state *shared_dpll;
        enum intel_dpll_id i;
 
@@ -257,8 +257,11 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
                pll = &dev_priv->shared_dplls[i];
 
                /* Only want to check enabled timings first */
-               if (shared_dpll[i].crtc_mask == 0)
+               if (shared_dpll[i].crtc_mask == 0) {
+                       if (!unused_pll)
+                               unused_pll = pll;
                        continue;
+               }
 
                if (memcmp(&crtc_state->dpll_hw_state,
                           &shared_dpll[i].hw_state,
@@ -273,14 +276,11 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
        }
 
        /* Ok no matching timings, maybe there's a free one? */
-       for (i = range_min; i <= range_max; i++) {
-               pll = &dev_priv->shared_dplls[i];
-               if (shared_dpll[i].crtc_mask == 0) {
-                       DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
-                                     crtc->base.base.id, crtc->base.name,
-                                     pll->info->name);
-                       return pll;
-               }
+       if (unused_pll) {
+               DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
+                             crtc->base.base.id, crtc->base.name,
+                             unused_pll->info->name);
+               return unused_pll;
        }
 
        return NULL;
@@ -345,9 +345,12 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
                                      struct intel_dpll_hw_state *hw_state)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
+       intel_wakeref_t wakeref;
+       u32 val;
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        val = I915_READ(PCH_DPLL(id));
@@ -355,7 +358,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
        hw_state->fp0 = I915_READ(PCH_FP0(id));
        hw_state->fp1 = I915_READ(PCH_FP1(id));
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
 
        return val & DPLL_VCO_ENABLE;
 }
@@ -487,7 +490,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
                                  struct intel_shared_dpll *pll)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
+       u32 val;
 
        val = I915_READ(WRPLL_CTL(id));
        I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
@@ -497,7 +500,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
                                 struct intel_shared_dpll *pll)
 {
-       uint32_t val;
+       u32 val;
 
        val = I915_READ(SPLL_CTL);
        I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
@@ -509,15 +512,18 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
                                       struct intel_dpll_hw_state *hw_state)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
+       intel_wakeref_t wakeref;
+       u32 val;
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        val = I915_READ(WRPLL_CTL(id));
        hw_state->wrpll = val;
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
 
        return val & WRPLL_PLL_ENABLE;
 }
@@ -526,15 +532,18 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
                                      struct intel_shared_dpll *pll,
                                      struct intel_dpll_hw_state *hw_state)
 {
-       uint32_t val;
+       intel_wakeref_t wakeref;
+       u32 val;
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        val = I915_READ(SPLL_CTL);
        hw_state->spll = val;
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
 
        return val & SPLL_PLL_ENABLE;
 }
@@ -630,11 +639,12 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock)
        return budget;
 }
 
-static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
-                                unsigned r2, unsigned n2, unsigned p,
+static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
+                                unsigned int r2, unsigned int n2,
+                                unsigned int p,
                                 struct hsw_wrpll_rnp *best)
 {
-       uint64_t a, b, c, d, diff, diff_best;
+       u64 a, b, c, d, diff, diff_best;
 
        /* No best (r,n,p) yet */
        if (best->p == 0) {
@@ -693,7 +703,7 @@ static void
 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
                        unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
 {
-       uint64_t freq2k;
+       u64 freq2k;
        unsigned p, n2, r2;
        struct hsw_wrpll_rnp best = { 0, 0, 0 };
        unsigned budget;
@@ -759,7 +769,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
                                                       struct intel_crtc_state *crtc_state)
 {
        struct intel_shared_dpll *pll;
-       uint32_t val;
+       u32 val;
        unsigned int p, n2, r2;
 
        hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
@@ -921,7 +931,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
                                    struct intel_shared_dpll *pll)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
+       u32 val;
 
        val = I915_READ(DPLL_CTRL1);
 
@@ -986,12 +996,15 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
                                     struct intel_shared_dpll *pll,
                                     struct intel_dpll_hw_state *hw_state)
 {
-       uint32_t val;
+       u32 val;
        const struct skl_dpll_regs *regs = skl_dpll_regs;
        const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        ret = false;
@@ -1011,7 +1024,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
        ret = true;
 
 out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
 
        return ret;
 }
@@ -1020,12 +1033,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
                                       struct intel_shared_dpll *pll,
                                       struct intel_dpll_hw_state *hw_state)
 {
-       uint32_t val;
        const struct skl_dpll_regs *regs = skl_dpll_regs;
        const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
+       u32 val;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        ret = false;
@@ -1041,15 +1057,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
        ret = true;
 
 out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
 
        return ret;
 }
 
 struct skl_wrpll_context {
-       uint64_t min_deviation;         /* current minimal deviation */
-       uint64_t central_freq;          /* chosen central freq */
-       uint64_t dco_freq;              /* chosen dco freq */
+       u64 min_deviation;              /* current minimal deviation */
+       u64 central_freq;               /* chosen central freq */
+       u64 dco_freq;                   /* chosen dco freq */
        unsigned int p;                 /* chosen divider */
 };
 
@@ -1065,11 +1081,11 @@ static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
 #define SKL_DCO_MAX_NDEVIATION 600
 
 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
-                                 uint64_t central_freq,
-                                 uint64_t dco_freq,
+                                 u64 central_freq,
+                                 u64 dco_freq,
                                  unsigned int divider)
 {
-       uint64_t deviation;
+       u64 deviation;
 
        deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
                              central_freq);
@@ -1143,21 +1159,21 @@ static void skl_wrpll_get_multipliers(unsigned int p,
 }
 
 struct skl_wrpll_params {
-       uint32_t        dco_fraction;
-       uint32_t        dco_integer;
-       uint32_t        qdiv_ratio;
-       uint32_t        qdiv_mode;
-       uint32_t        kdiv;
-       uint32_t        pdiv;
-       uint32_t        central_freq;
+       u32 dco_fraction;
+       u32 dco_integer;
+       u32 qdiv_ratio;
+       u32 qdiv_mode;
+       u32 kdiv;
+       u32 pdiv;
+       u32 central_freq;
 };
 
 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
-                                     uint64_t afe_clock,
-                                     uint64_t central_freq,
-                                     uint32_t p0, uint32_t p1, uint32_t p2)
+                                     u64 afe_clock,
+                                     u64 central_freq,
+                                     u32 p0, u32 p1, u32 p2)
 {
-       uint64_t dco_freq;
+       u64 dco_freq;
 
        switch (central_freq) {
        case 9600000000ULL:
@@ -1223,10 +1239,10 @@ static bool
 skl_ddi_calculate_wrpll(int clock /* in Hz */,
                        struct skl_wrpll_params *wrpll_params)
 {
-       uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
-       uint64_t dco_central_freq[3] = {8400000000ULL,
-                                       9000000000ULL,
-                                       9600000000ULL};
+       u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+       u64 dco_central_freq[3] = { 8400000000ULL,
+                                   9000000000ULL,
+                                   9600000000ULL };
        static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
                                             24, 28, 30, 32, 36, 40, 42, 44,
                                             48, 52, 54, 56, 60, 64, 66, 68,
@@ -1250,7 +1266,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
                for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
                        for (i = 0; i < dividers[d].n_dividers; i++) {
                                unsigned int p = dividers[d].list[i];
-                               uint64_t dco_freq = p * afe_clock;
+                               u64 dco_freq = p * afe_clock;
 
                                skl_wrpll_try_divider(&ctx,
                                                      dco_central_freq[dco],
@@ -1296,7 +1312,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state,
                                      int clock)
 {
-       uint32_t ctrl1, cfgcr1, cfgcr2;
+       u32 ctrl1, cfgcr1, cfgcr2;
        struct skl_wrpll_params wrpll_params = { 0, };
 
        /*
@@ -1333,7 +1349,7 @@ static bool
 skl_ddi_dp_set_dpll_hw_state(int clock,
                             struct intel_dpll_hw_state *dpll_hw_state)
 {
-       uint32_t ctrl1;
+       u32 ctrl1;
 
        /*
         * See comment in intel_dpll_hw_state to understand why we always use 0
@@ -1435,7 +1451,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
                                struct intel_shared_dpll *pll)
 {
-       uint32_t temp;
+       u32 temp;
        enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
        enum dpio_phy phy;
        enum dpio_channel ch;
@@ -1556,7 +1572,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
                                        struct intel_shared_dpll *pll)
 {
        enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
-       uint32_t temp;
+       u32 temp;
 
        temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
        temp &= ~PORT_PLL_ENABLE;
@@ -1579,14 +1595,17 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
                                        struct intel_dpll_hw_state *hw_state)
 {
        enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
-       uint32_t val;
-       bool ret;
+       intel_wakeref_t wakeref;
        enum dpio_phy phy;
        enum dpio_channel ch;
+       u32 val;
+       bool ret;
 
        bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        ret = false;
@@ -1643,7 +1662,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
        ret = true;
 
 out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
 
        return ret;
 }
@@ -1651,12 +1670,12 @@ out:
 /* bxt clock parameters */
 struct bxt_clk_div {
        int clock;
-       uint32_t p1;
-       uint32_t p2;
-       uint32_t m2_int;
-       uint32_t m2_frac;
+       u32 p1;
+       u32 p2;
+       u32 m2_int;
+       u32 m2_frac;
        bool m2_frac_en;
-       uint32_t n;
+       u32 n;
 
        int vco;
 };
@@ -1723,8 +1742,8 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
                          struct intel_dpll_hw_state *dpll_hw_state)
 {
        int vco = clk_div->vco;
-       uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-       uint32_t lanestagger;
+       u32 prop_coef, int_coef, gain_ctl, targ_cnt;
+       u32 lanestagger;
 
        if (vco >= 6200000 && vco <= 6700000) {
                prop_coef = 4;
@@ -1873,7 +1892,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_GEN(dev_priv) < 9) {
-               uint32_t val = I915_READ(LCPLL_CTL);
+               u32 val = I915_READ(LCPLL_CTL);
 
                /*
                 * The LCPLL register should be turned on by the BIOS. For now
@@ -1959,7 +1978,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
                               struct intel_shared_dpll *pll)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
+       u32 val;
 
        /* 1. Enable DPLL power in DPLL_ENABLE. */
        val = I915_READ(CNL_DPLL_ENABLE(id));
@@ -2034,7 +2053,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
                                struct intel_shared_dpll *pll)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
+       u32 val;
 
        /*
         * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
@@ -2091,10 +2110,13 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
                                     struct intel_dpll_hw_state *hw_state)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
+       intel_wakeref_t wakeref;
+       u32 val;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        ret = false;
@@ -2113,7 +2135,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
        ret = true;
 
 out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
 
        return ret;
 }
@@ -2225,7 +2247,7 @@ cnl_ddi_calculate_wrpll(int clock,
                        struct skl_wrpll_params *wrpll_params)
 {
        u32 afe_clock = clock * 5;
-       uint32_t ref_clock;
+       u32 ref_clock;
        u32 dco_min = 7998000;
        u32 dco_max = 10000000;
        u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2271,7 +2293,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
                                      int clock)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       uint32_t cfgcr0, cfgcr1;
+       u32 cfgcr0, cfgcr1;
        struct skl_wrpll_params wrpll_params = { 0, };
 
        cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
@@ -2300,7 +2322,7 @@ static bool
 cnl_ddi_dp_set_dpll_hw_state(int clock,
                             struct intel_dpll_hw_state *dpll_hw_state)
 {
-       uint32_t cfgcr0;
+       u32 cfgcr0;
 
        cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
 
@@ -2517,7 +2539,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
                                struct intel_dpll_hw_state *pll_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       uint32_t cfgcr0, cfgcr1;
+       u32 cfgcr0, cfgcr1;
        struct skl_wrpll_params pll_params = { 0 };
        bool ret;
 
@@ -2547,10 +2569,10 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 }
 
 int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
-                              uint32_t pll_id)
+                              u32 pll_id)
 {
-       uint32_t cfgcr0, cfgcr1;
-       uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+       u32 cfgcr0, cfgcr1;
+       u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
        const struct skl_wrpll_params *params;
        int index, n_entries, link_clock;
 
@@ -2617,14 +2639,14 @@ int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
        return link_clock;
 }
 
-static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
+static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
 {
-       return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
+       return id - DPLL_ID_ICL_MGPLL1;
 }
 
-enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
 {
-       return port - PORT_C + DPLL_ID_ICL_MGPLL1;
+       return tc_port + DPLL_ID_ICL_MGPLL1;
 }
 
 bool intel_dpll_is_combophy(enum intel_dpll_id id)
@@ -2633,10 +2655,10 @@ bool intel_dpll_is_combophy(enum intel_dpll_id id)
 }
 
 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
-                                    uint32_t *target_dco_khz,
+                                    u32 *target_dco_khz,
                                     struct intel_dpll_hw_state *state)
 {
-       uint32_t dco_min_freq, dco_max_freq;
+       u32 dco_min_freq, dco_max_freq;
        int div1_vals[] = {7, 5, 3, 2};
        unsigned int i;
        int div2;
@@ -2712,12 +2734,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int refclk_khz = dev_priv->cdclk.hw.ref;
-       uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
-       uint32_t iref_ndiv, iref_trim, iref_pulse_w;
-       uint32_t prop_coeff, int_coeff;
-       uint32_t tdc_targetcnt, feedfwgain;
-       uint64_t ssc_stepsize, ssc_steplen, ssc_steplog;
-       uint64_t tmp;
+       u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
+       u32 iref_ndiv, iref_trim, iref_pulse_w;
+       u32 prop_coeff, int_coeff;
+       u32 tdc_targetcnt, feedfwgain;
+       u64 ssc_stepsize, ssc_steplen, ssc_steplog;
+       u64 tmp;
        bool use_ssc = false;
        bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
 
@@ -2740,7 +2762,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
        }
        m2div_rem = dco_khz % (refclk_khz * m1div);
 
-       tmp = (uint64_t)m2div_rem * (1 << 22);
+       tmp = (u64)m2div_rem * (1 << 22);
        do_div(tmp, refclk_khz * m1div);
        m2div_frac = tmp;
 
@@ -2799,11 +2821,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
        }
 
        if (use_ssc) {
-               tmp = (uint64_t)dco_khz * 47 * 32;
+               tmp = (u64)dco_khz * 47 * 32;
                do_div(tmp, refclk_khz * m1div * 10000);
                ssc_stepsize = tmp;
 
-               tmp = (uint64_t)dco_khz * 1000;
+               tmp = (u64)dco_khz * 1000;
                ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
        } else {
                ssc_stepsize = 0;
@@ -2903,7 +2925,10 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                        ret = icl_calc_dpll_state(crtc_state, encoder, clock,
                                                  &pll_state);
                } else {
-                       min = icl_port_to_mg_pll_id(port);
+                       enum tc_port tc_port;
+
+                       tc_port = intel_port_to_tc(dev_priv, port);
+                       min = icl_tc_port_to_pll_id(tc_port);
                        max = min;
                        ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
                                                    &pll_state);
@@ -2937,12 +2962,8 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
                return CNL_DPLL_ENABLE(id);
        else if (id == DPLL_ID_ICL_TBTPLL)
                return TBT_PLL_ENABLE;
-       else
-               /*
-                * TODO: Make MG_PLL macros use
-                * tc port id instead of port id
-                */
-               return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
+
+       return MG_PLL_ENABLE(icl_pll_id_to_tc_port(id));
 }
 
 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2950,11 +2971,13 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
                                 struct intel_dpll_hw_state *hw_state)
 {
        const enum intel_dpll_id id = pll->info->id;
-       uint32_t val;
-       enum port port;
+       intel_wakeref_t wakeref;
        bool ret = false;
+       u32 val;
 
-       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_PLLS);
+       if (!wakeref)
                return false;
 
        val = I915_READ(icl_pll_id_to_enable_reg(id));
@@ -2966,32 +2989,33 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
                hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
                hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
        } else {
-               port = icl_mg_pll_id_to_port(id);
-               hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+               enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+
+               hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
                hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
 
                hw_state->mg_clktop2_coreclkctl1 =
-                       I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+                       I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
                hw_state->mg_clktop2_coreclkctl1 &=
                        MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
 
                hw_state->mg_clktop2_hsclkctl =
-                       I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+                       I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
                hw_state->mg_clktop2_hsclkctl &=
                        MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
                        MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
                        MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
                        MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
 
-               hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
-               hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
-               hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
-               hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
-               hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+               hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+               hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
+               hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
+               hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
+               hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
 
-               hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
+               hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
                hw_state->mg_pll_tdc_coldst_bias =
-                       I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+                       I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
 
                if (dev_priv->cdclk.hw.ref == 38400) {
                        hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
@@ -3007,7 +3031,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
        ret = true;
 out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
        return ret;
 }
 
@@ -3026,7 +3050,7 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
                             struct intel_shared_dpll *pll)
 {
        struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
-       enum port port = icl_mg_pll_id_to_port(pll->info->id);
+       enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
        u32 val;
 
        /*
@@ -3035,41 +3059,41 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
         * during the calc/readout phase if the mask depends on some other HW
         * state like refclk, see icl_calc_mg_pll_state().
         */
-       val = I915_READ(MG_REFCLKIN_CTL(port));
+       val = I915_READ(MG_REFCLKIN_CTL(tc_port));
        val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
        val |= hw_state->mg_refclkin_ctl;
-       I915_WRITE(MG_REFCLKIN_CTL(port), val);
+       I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
 
-       val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+       val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
        val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
        val |= hw_state->mg_clktop2_coreclkctl1;
-       I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val);
+       I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
 
-       val = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+       val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
        val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
                 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
                 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
                 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
        val |= hw_state->mg_clktop2_hsclkctl;
-       I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val);
+       I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
 
-       I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
-       I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
-       I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
-       I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
-       I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
+       I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+       I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+       I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+       I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
+       I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
 
-       val = I915_READ(MG_PLL_BIAS(port));
+       val = I915_READ(MG_PLL_BIAS(tc_port));
        val &= ~hw_state->mg_pll_bias_mask;
        val |= hw_state->mg_pll_bias;
-       I915_WRITE(MG_PLL_BIAS(port), val);
+       I915_WRITE(MG_PLL_BIAS(tc_port), val);
 
-       val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+       val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
        val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
        val |= hw_state->mg_pll_tdc_coldst_bias;
-       I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val);
+       I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
 
-       POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
+       POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
 }
 
 static void icl_pll_enable(struct drm_i915_private *dev_priv,
@@ -3077,7 +3101,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
 {
        const enum intel_dpll_id id = pll->info->id;
        i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
-       uint32_t val;
+       u32 val;
 
        val = I915_READ(enable_reg);
        val |= PLL_POWER_ENABLE;
@@ -3118,7 +3142,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 {
        const enum intel_dpll_id id = pll->info->id;
        i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
-       uint32_t val;
+       u32 val;
 
        /* The first steps are done by intel_ddi_post_disable(). */
 
index a033d8f06d4a80f726b13287067a6953b326f14d..40e8391a92f24dd2ec116e4950a01ceb962905b3 100644 (file)
@@ -138,14 +138,14 @@ enum intel_dpll_id {
 
 struct intel_dpll_hw_state {
        /* i9xx, pch plls */
-       uint32_t dpll;
-       uint32_t dpll_md;
-       uint32_t fp0;
-       uint32_t fp1;
+       u32 dpll;
+       u32 dpll_md;
+       u32 fp0;
+       u32 fp1;
 
        /* hsw, bdw */
-       uint32_t wrpll;
-       uint32_t spll;
+       u32 wrpll;
+       u32 spll;
 
        /* skl */
        /*
@@ -154,34 +154,33 @@ struct intel_dpll_hw_state {
         * the register.  This allows us to easily compare the state to share
         * the DPLL.
         */
-       uint32_t ctrl1;
+       u32 ctrl1;
        /* HDMI only, 0 when used for DP */
-       uint32_t cfgcr1, cfgcr2;
+       u32 cfgcr1, cfgcr2;
 
        /* cnl */
-       uint32_t cfgcr0;
+       u32 cfgcr0;
        /* CNL also uses cfgcr1 */
 
        /* bxt */
-       uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
-                pcsdw12;
+       u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
 
        /*
         * ICL uses the following, already defined:
-        * uint32_t cfgcr0, cfgcr1;
-        */
-       uint32_t mg_refclkin_ctl;
-       uint32_t mg_clktop2_coreclkctl1;
-       uint32_t mg_clktop2_hsclkctl;
-       uint32_t mg_pll_div0;
-       uint32_t mg_pll_div1;
-       uint32_t mg_pll_lf;
-       uint32_t mg_pll_frac_lock;
-       uint32_t mg_pll_ssc;
-       uint32_t mg_pll_bias;
-       uint32_t mg_pll_tdc_coldst_bias;
-       uint32_t mg_pll_bias_mask;
-       uint32_t mg_pll_tdc_coldst_bias_mask;
+        * u32 cfgcr0, cfgcr1;
+        */
+       u32 mg_refclkin_ctl;
+       u32 mg_clktop2_coreclkctl1;
+       u32 mg_clktop2_hsclkctl;
+       u32 mg_pll_div0;
+       u32 mg_pll_div1;
+       u32 mg_pll_lf;
+       u32 mg_pll_frac_lock;
+       u32 mg_pll_ssc;
+       u32 mg_pll_bias;
+       u32 mg_pll_tdc_coldst_bias;
+       u32 mg_pll_bias_mask;
+       u32 mg_pll_tdc_coldst_bias_mask;
 };
 
 /**
@@ -280,7 +279,7 @@ struct dpll_info {
         *     Inform the state checker that the DPLL is kept enabled even if
         *     not in use by any CRTC.
         */
-       uint32_t flags;
+       u32 flags;
 };
 
 /**
@@ -343,9 +342,9 @@ void intel_shared_dpll_init(struct drm_device *dev);
 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
                              struct intel_dpll_hw_state *hw_state);
 int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
-                              uint32_t pll_id);
+                              u32 pll_id);
 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
-enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
 bool intel_dpll_is_combophy(enum intel_dpll_id id);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
index f94a04b4ad8788bcc1d8f9ca3073eaa00f33648a..15db41394b9ed75d9de8545c8a0faff9efbc9a9b 100644 (file)
 #include <linux/i2c.h>
 #include <linux/hdmi.h>
 #include <linux/sched/clock.h>
+#include <linux/stackdepot.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_dp_dual_mode_helper.h>
 #include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
 #include <drm/drm_atomic.h>
 #include <media/cec-notifier.h>
 
+struct drm_printer;
+
 /**
  * __wait_for - magic wait macro
  *
@@ -209,6 +213,16 @@ struct intel_fbdev {
        unsigned long vma_flags;
        async_cookie_t cookie;
        int preferred_bpp;
+
+       /* Whether or not fbdev hpd processing is temporarily suspended */
+       bool hpd_suspended : 1;
+       /* Set when a hotplug was received while HPD processing was
+        * suspended
+        */
+       bool hpd_waiting : 1;
+
+       /* Protects hpd_suspended */
+       struct mutex hpd_lock;
 };
 
 struct intel_encoder {
@@ -222,9 +236,9 @@ struct intel_encoder {
        enum intel_output_type (*compute_output_type)(struct intel_encoder *,
                                                      struct intel_crtc_state *,
                                                      struct drm_connector_state *);
-       bool (*compute_config)(struct intel_encoder *,
-                              struct intel_crtc_state *,
-                              struct drm_connector_state *);
+       int (*compute_config)(struct intel_encoder *,
+                             struct intel_crtc_state *,
+                             struct drm_connector_state *);
        void (*pre_pll_enable)(struct intel_encoder *,
                               const struct intel_crtc_state *,
                               const struct drm_connector_state *);
@@ -243,6 +257,9 @@ struct intel_encoder {
        void (*post_pll_disable)(struct intel_encoder *,
                                 const struct intel_crtc_state *,
                                 const struct drm_connector_state *);
+       void (*update_pipe)(struct intel_encoder *,
+                           const struct intel_crtc_state *,
+                           const struct drm_connector_state *);
        /* Read out the current hw state of this connector, returning true if
         * the encoder is active. If the encoder is enabled it also set the pipe
         * it is connected to in the pipe parameter. */
@@ -294,13 +311,12 @@ struct intel_panel {
 
                /* Connector and platform specific backlight functions */
                int (*setup)(struct intel_connector *connector, enum pipe pipe);
-               uint32_t (*get)(struct intel_connector *connector);
-               void (*set)(const struct drm_connector_state *conn_state, uint32_t level);
+               u32 (*get)(struct intel_connector *connector);
+               void (*set)(const struct drm_connector_state *conn_state, u32 level);
                void (*disable)(const struct drm_connector_state *conn_state);
                void (*enable)(const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state);
-               uint32_t (*hz_to_pwm)(struct intel_connector *connector,
-                                     uint32_t hz);
+               u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
                void (*power)(struct intel_connector *, bool enable);
        } backlight;
 };
@@ -592,7 +608,7 @@ struct intel_initial_plane_config {
 
 struct intel_scaler {
        int in_use;
-       uint32_t mode;
+       u32 mode;
 };
 
 struct intel_crtc_scaler_state {
@@ -624,13 +640,15 @@ struct intel_crtc_scaler_state {
 };
 
 /* drm_mode->private_flags */
-#define I915_MODE_FLAG_INHERITED 1
+#define I915_MODE_FLAG_INHERITED (1<<0)
 /* Flag to get scanline using frame time stamps */
 #define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
+/* Flag to use the scanline counter instead of the pixel counter */
+#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
 
 struct intel_pipe_wm {
        struct intel_wm_level wm[5];
-       uint32_t linetime;
+       u32 linetime;
        bool fbc_wm_enabled;
        bool pipe_enabled;
        bool sprites_enabled;
@@ -646,7 +664,7 @@ struct skl_plane_wm {
 
 struct skl_pipe_wm {
        struct skl_plane_wm planes[I915_MAX_PLANES];
-       uint32_t linetime;
+       u32 linetime;
 };
 
 enum vlv_wm_level {
@@ -659,7 +677,7 @@ enum vlv_wm_level {
 struct vlv_wm_state {
        struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS];
        struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS];
-       uint8_t num_levels;
+       u8 num_levels;
        bool cxsr;
 };
 
@@ -872,13 +890,13 @@ struct intel_crtc_state {
        /* Used by SDVO (and if we ever fix it, HDMI). */
        unsigned pixel_multiplier;
 
-       uint8_t lane_count;
+       u8 lane_count;
 
        /*
         * Used by platforms having DP/HDMI PHY with programmable lane
         * latency optimization.
         */
-       uint8_t lane_lat_optim_mask;
+       u8 lane_lat_optim_mask;
 
        /* minimum acceptable voltage level */
        u8 min_voltage_level;
@@ -922,7 +940,7 @@ struct intel_crtc_state {
        struct intel_crtc_wm_state wm;
 
        /* Gamma mode programmed on the pipe */
-       uint32_t gamma_mode;
+       u32 gamma_mode;
 
        /* bitmask of visible planes (enum plane_id) */
        u8 active_planes;
@@ -1008,7 +1026,7 @@ struct intel_plane {
        enum pipe pipe;
        bool has_fbc;
        bool has_ccs;
-       uint32_t frontbuffer_bit;
+       u32 frontbuffer_bit;
 
        struct {
                u32 base, cntl, size;
@@ -1074,7 +1092,6 @@ struct intel_hdmi {
        } dp_dual_mode;
        bool has_hdmi_sink;
        bool has_audio;
-       bool rgb_quant_range_selectable;
        struct intel_connector *attached_connector;
        struct cec_notifier *cec_notifier;
 };
@@ -1104,9 +1121,9 @@ enum link_m_n_set {
 
 struct intel_dp_compliance_data {
        unsigned long edid;
-       uint8_t video_pattern;
-       uint16_t hdisplay, vdisplay;
-       uint8_t bpc;
+       u8 video_pattern;
+       u16 hdisplay, vdisplay;
+       u8 bpc;
 };
 
 struct intel_dp_compliance {
@@ -1119,18 +1136,18 @@ struct intel_dp_compliance {
 
 struct intel_dp {
        i915_reg_t output_reg;
-       uint32_t DP;
+       u32 DP;
        int link_rate;
-       uint8_t lane_count;
-       uint8_t sink_count;
+       u8 lane_count;
+       u8 sink_count;
        bool link_mst;
        bool link_trained;
        bool has_audio;
        bool reset_link_params;
-       uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
-       uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
-       uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
-       uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+       u8 dpcd[DP_RECEIVER_CAP_SIZE];
+       u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
+       u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+       u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
        u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
        u8 fec_capable;
        /* source rates */
@@ -1150,7 +1167,7 @@ struct intel_dp {
        /* sink or branch descriptor */
        struct drm_dp_desc desc;
        struct drm_dp_aux aux;
-       uint8_t train_set[4];
+       u8 train_set[4];
        int panel_power_up_delay;
        int panel_power_down_delay;
        int panel_power_cycle_delay;
@@ -1192,14 +1209,13 @@ struct intel_dp {
        struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
        struct drm_dp_mst_topology_mgr mst_mgr;
 
-       uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+       u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
        /*
         * This function returns the value we have to program the AUX_CTL
         * register with to kick off an AUX transaction.
         */
-       uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
-                                    int send_bytes,
-                                    uint32_t aux_clock_divider);
+       u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes,
+                               u32 aux_clock_divider);
 
        i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
        i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
@@ -1209,6 +1225,9 @@ struct intel_dp {
 
        /* Displayport compliance testing */
        struct intel_dp_compliance compliance;
+
+       /* Display stream compression testing */
+       bool force_dsc_en;
 };
 
 enum lspcon_vendor {
@@ -1230,10 +1249,11 @@ struct intel_digital_port {
        struct intel_lspcon lspcon;
        enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
        bool release_cl2_override;
-       uint8_t max_lanes;
+       u8 max_lanes;
        /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
        enum aux_ch aux_ch;
        enum intel_display_power_domain ddi_io_power_domain;
+       bool tc_legacy_port:1;
        enum tc_port_type tc_type;
 
        void (*write_infoframe)(struct intel_encoder *encoder,
@@ -1464,8 +1484,8 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
 void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
 
 /* i915_irq.c */
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1528,7 +1548,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
 void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
                                         struct intel_crtc_state *crtc_state);
 u32 bxt_signal_levels(struct intel_dp *intel_dp);
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
+u32 ddi_signal_levels(struct intel_dp *intel_dp);
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
 u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
                                 u8 voltage_swing);
@@ -1668,11 +1688,11 @@ void intel_cleanup_plane_fb(struct drm_plane *plane,
 int intel_plane_atomic_get_property(struct drm_plane *plane,
                                    const struct drm_plane_state *state,
                                    struct drm_property *property,
-                                   uint64_t *val);
+                                   u64 *val);
 int intel_plane_atomic_set_property(struct drm_plane *plane,
                                    struct drm_plane_state *state,
                                    struct drm_property *property,
-                                   uint64_t val);
+                                   u64 val);
 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
                                    struct drm_crtc_state *crtc_state,
                                    const struct intel_plane_state *old_plane_state,
@@ -1746,9 +1766,10 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
 
 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
                        const struct intel_plane_state *plane_state);
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
                  const struct intel_plane_state *plane_state);
-u32 glk_color_ctl(const struct intel_plane_state *plane_state);
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
                     int plane);
 int skl_check_plane_surface(struct intel_plane_state *plane_state);
@@ -1792,10 +1813,10 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                             struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-                             int link_rate, uint8_t lane_count,
+                             int link_rate, u8 lane_count,
                              bool link_mst);
 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
-                                           int link_rate, uint8_t lane_count);
+                                           int link_rate, u8 lane_count);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 int intel_dp_retrain_link(struct intel_encoder *encoder,
@@ -1806,10 +1827,10 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
                                           bool enable);
 void intel_dp_encoder_reset(struct drm_encoder *encoder);
 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
-void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-bool intel_dp_compute_config(struct intel_encoder *encoder,
-                            struct intel_crtc_state *pipe_config,
-                            struct drm_connector_state *conn_state);
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
+int intel_dp_compute_config(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state);
 bool intel_dp_is_edp(struct intel_dp *intel_dp);
 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
 enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
@@ -1827,7 +1848,7 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp);
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
 void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
                           const struct intel_crtc_state *crtc_state);
@@ -1840,24 +1861,24 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
-                                      uint8_t dp_train_pat);
+                                      u8 dp_train_pat);
 void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp);
 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-uint8_t
+u8
 intel_dp_voltage_max(struct intel_dp *intel_dp);
-uint8_t
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
-                          uint8_t *link_bw, uint8_t *rate_select);
+                          u8 *link_bw, u8 *rate_select);
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
 bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
-uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
-                                    int mode_clock, int mode_hdisplay);
-uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
-                                    int mode_hdisplay);
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+                               int mode_clock, int mode_hdisplay);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+                               int mode_hdisplay);
 
 /* intel_vdsc.c */
 int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
@@ -1874,6 +1895,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_digital_port_connected(struct intel_encoder *encoder);
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+                          struct intel_digital_port *dig_port);
 
 /* intel_dp_aux_backlight.c */
 int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1967,9 +1990,9 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                               struct intel_connector *intel_connector);
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *pipe_config,
-                              struct drm_connector_state *conn_state);
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state);
 bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
                                       struct drm_connector *connector,
                                       bool high_tmds_clock_ratio,
@@ -2014,6 +2037,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
                                enum pipe pipe);
 void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state);
+void intel_panel_update_backlight(struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *crtc_state,
+                                 const struct drm_connector_state *conn_state);
 void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
 extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_i915_private *dev_priv,
@@ -2075,6 +2101,7 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
 void intel_init_quirks(struct drm_i915_private *dev_priv);
 
 /* intel_runtime_pm.c */
+void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
 int intel_power_domains_init(struct drm_i915_private *);
 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
@@ -2097,6 +2124,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
 void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
 const char *
 intel_display_power_domain_str(enum intel_display_power_domain domain);
 
@@ -2104,33 +2132,42 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
                                    enum intel_display_power_domain domain);
 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
                                      enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain);
-bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
                                        enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+                                  enum intel_display_power_domain domain);
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+                                      enum intel_display_power_domain domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain);
+                            enum intel_display_power_domain domain,
+                            intel_wakeref_t wakeref);
+#else
+#define intel_display_power_put(i915, domain, wakeref) \
+       intel_display_power_put_unchecked(i915, domain)
+#endif
 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
                            u8 req_slices);
 
 static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
+assert_rpm_device_not_suspended(struct drm_i915_private *i915)
 {
-       WARN_ONCE(dev_priv->runtime_pm.suspended,
+       WARN_ONCE(i915->runtime_pm.suspended,
                  "Device suspended during HW access\n");
 }
 
 static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
+assert_rpm_wakelock_held(struct drm_i915_private *i915)
 {
-       assert_rpm_device_not_suspended(dev_priv);
-       WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
+       assert_rpm_device_not_suspended(i915);
+       WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
                  "RPM wakelock ref not held during HW access");
 }
 
 /**
  * disable_rpm_wakeref_asserts - disable the RPM assert checks
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function disable asserts that check if we hold an RPM wakelock
  * reference, while keeping the device-not-suspended checks still enabled.
@@ -2147,14 +2184,14 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
  * enable_rpm_wakeref_asserts().
  */
 static inline void
-disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
 {
-       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
+       atomic_inc(&i915->runtime_pm.wakeref_count);
 }
 
 /**
  * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function re-enables the RPM assert checks after disabling them with
  * disable_rpm_wakeref_asserts. It's meant to be used only in special
@@ -2164,15 +2201,39 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
  * disable_rpm_wakeref_asserts().
  */
 static inline void
-enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
 {
-       atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+       atomic_dec(&i915->runtime_pm.wakeref_count);
 }
 
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
+
+#define with_intel_runtime_pm(i915, wf) \
+       for ((wf) = intel_runtime_pm_get(i915); (wf); \
+            intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+#define with_intel_runtime_pm_if_in_use(i915, wf) \
+       for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
+            intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
+#else
+#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+                                   struct drm_printer *p);
+#else
+static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+                                                 struct drm_printer *p)
+{
+}
+#endif
 
 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
                             bool override, unsigned int mask);
@@ -2200,16 +2261,16 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
-void g4x_wm_get_hw_state(struct drm_device *dev);
-void vlv_wm_get_hw_state(struct drm_device *dev);
-void ilk_wm_get_hw_state(struct drm_device *dev);
-void skl_wm_get_hw_state(struct drm_device *dev);
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
                               struct skl_ddb_entry *ddb_y,
                               struct skl_ddb_entry *ddb_uv);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                          struct skl_ddb_allocation *ddb /* out */);
-void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
                              struct skl_pipe_wm *out);
 void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
 void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -2278,11 +2339,11 @@ void intel_tv_init(struct drm_i915_private *dev_priv);
 int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
                                                const struct drm_connector_state *state,
                                                struct drm_property *property,
-                                               uint64_t *val);
+                                               u64 *val);
 int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
                                                struct drm_connector_state *state,
                                                struct drm_property *property,
-                                               uint64_t val);
+                                               u64 val);
 int intel_digital_connector_atomic_check(struct drm_connector *conn,
                                         struct drm_connector_state *new_state);
 struct drm_connector_state *
@@ -2327,10 +2388,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
                                        struct intel_plane_state *intel_state);
 
 /* intel_color.c */
-void intel_color_init(struct drm_crtc *crtc);
-int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void intel_color_set_csc(struct drm_crtc_state *crtc_state);
-void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_commit(const struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
 
 /* intel_lspcon.c */
 bool lspcon_init(struct intel_digital_port *intel_dig_port);
index d968f1f13e0923b7793d5ed944bc9a1fc5049c23..a9a19778dc7fc55c4b45996afa18516f6a8915a4 100644 (file)
@@ -24,7 +24,6 @@
 #ifndef _INTEL_DSI_H
 #define _INTEL_DSI_H
 
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_mipi_dsi.h>
 #include "intel_drv.h"
@@ -40,6 +39,7 @@ struct intel_dsi {
        struct intel_encoder base;
 
        struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
+       intel_wakeref_t io_wakeref[I915_MAX_PORTS];
 
        /* GPIO Desc for CRC based Panel control */
        struct gpio_desc *gpio_panel;
@@ -173,7 +173,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
 void vlv_dsi_pll_enable(struct intel_encoder *encoder,
                        const struct intel_crtc_state *config);
 void vlv_dsi_pll_disable(struct intel_encoder *encoder);
-u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
                     struct intel_crtc_state *config);
 void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
 
@@ -183,7 +183,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
 void bxt_dsi_pll_enable(struct intel_encoder *encoder,
                        const struct intel_crtc_state *config);
 void bxt_dsi_pll_disable(struct intel_encoder *encoder);
-u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
                     struct intel_crtc_state *config);
 void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
 
index a1a8b3790e616261c73383a22a1a08724f1d6f64..06a11c35a784fc7957227792fd46474d960cfc81 100644 (file)
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/i915_drm.h>
 #include <linux/gpio/consumer.h>
+#include <linux/mfd/intel_soc_pmic.h>
 #include <linux/slab.h>
 #include <video/mipi_display.h>
 #include <asm/intel-mid.h>
-#include <video/mipi_display.h>
+#include <asm/unaligned.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_dsi.h"
@@ -393,7 +393,25 @@ static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
 
 static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
 {
-       DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+#ifdef CONFIG_PMIC_OPREGION
+       u32 value, mask, reg_address;
+       u16 i2c_address;
+       int ret;
+
+       /* byte 0 aka PMIC Flag is reserved */
+       i2c_address     = get_unaligned_le16(data + 1);
+       reg_address     = get_unaligned_le32(data + 3);
+       value           = get_unaligned_le32(data + 7);
+       mask            = get_unaligned_le32(data + 11);
+
+       ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address,
+                                                       reg_address,
+                                                       value, mask);
+       if (ret)
+               DRM_ERROR("%s failed, error: %d\n", __func__, ret);
+#else
+       DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+#endif
 
        return data + 15;
 }
index 0042a7f69387780f6f1d5c105ca9cae43d41713d..a6c82482a841be503a3c6fe8ada5ec99981928df 100644 (file)
@@ -26,7 +26,6 @@
  */
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include "intel_drv.h"
@@ -235,9 +234,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
        return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
 }
 
-static bool intel_dvo_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config,
-                                    struct drm_connector_state *conn_state)
+static int intel_dvo_compute_config(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        const struct drm_display_mode *fixed_mode =
@@ -254,10 +253,11 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
-       return true;
+
+       return 0;
 }
 
 static void intel_dvo_pre_enable(struct intel_encoder *encoder,
index ff5b7bc692ce95ed0d5a31c081e3a843be004b63..49fa43ff02ba09dc63db471b4219e6b4db385cbf 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drm_print.h>
 
 #include "i915_drv.h"
+#include "i915_reset.h"
 #include "intel_ringbuffer.h"
 #include "intel_lrc.h"
 
@@ -261,6 +262,31 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
                         info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
 }
 
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       i915_reg_t hwstam;
+
+       /*
+        * Though they added more rings on g4x/ilk, they did not add
+        * per-engine HWSTAM until gen6.
+        */
+       if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
+               return;
+
+       hwstam = RING_HWSTAM(engine->mmio_base);
+       if (INTEL_GEN(dev_priv) >= 3)
+               I915_WRITE(hwstam, mask);
+       else
+               I915_WRITE16(hwstam, mask);
+}
+
+static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
+{
+       /* Mask off all writes into the unknown HWSP */
+       intel_engine_set_hwsp_writemask(engine, ~0u);
+}
+
 static int
 intel_engine_setup(struct drm_i915_private *dev_priv,
                   enum intel_engine_id id)
@@ -312,6 +338,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 
        ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
+       /* Scrub mmio state on takeover */
+       intel_engine_sanitize_mmio(engine);
+
        dev_priv->engine_class[info->class][info->instance] = engine;
        dev_priv->engine[id] = engine;
        return 0;
@@ -365,7 +394,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
                goto cleanup;
        }
 
-       device_info->num_rings = hweight32(mask);
+       RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
 
        i915_check_and_clear_faults(dev_priv);
 
@@ -426,33 +455,9 @@ cleanup:
        return err;
 }
 
-void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       /* Our semaphore implementation is strictly monotonic (i.e. we proceed
-        * so long as the semaphore value in the register/page is greater
-        * than the sync value), so whenever we reset the seqno,
-        * so long as we reset the tracking semaphore value to 0, it will
-        * always be before the next request's seqno. If we don't reset
-        * the semaphore value, then when the seqno moves backwards all
-        * future waits will complete instantly (causing rendering corruption).
-        */
-       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
-               I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
-               I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
-               if (HAS_VEBOX(dev_priv))
-                       I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
-       }
-
        intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-       clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
-
-       /* After manually advancing the seqno, fake the interrupt in case
-        * there are any waiters for that seqno.
-        */
-       intel_engine_wakeup(engine);
-
        GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
 }
 
@@ -469,50 +474,67 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
        GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
        GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 
-       execlists->queue_priority = INT_MIN;
+       execlists->queue_priority_hint = INT_MIN;
        execlists->queue = RB_ROOT_CACHED;
 }
 
-/**
- * intel_engines_setup_common - setup engine state not requiring hw access
- * @engine: Engine to setup.
- *
- * Initializes @engine@ structure members shared between legacy and execlists
- * submission modes which do not require hardware access.
- *
- * Typically done early in the submission mode specific engine setup stage.
- */
-void intel_engine_setup_common(struct intel_engine_cs *engine)
+static void cleanup_status_page(struct intel_engine_cs *engine)
 {
-       i915_timeline_init(engine->i915, &engine->timeline, engine->name);
-       i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
+       struct i915_vma *vma;
 
-       intel_engine_init_execlist(engine);
-       intel_engine_init_hangcheck(engine);
-       intel_engine_init_batch_pool(engine);
-       intel_engine_init_cmd_parser(engine);
+       /* Prevent writes into HWSP after returning the page to the system */
+       intel_engine_set_hwsp_writemask(engine, ~0u);
+
+       vma = fetch_and_zero(&engine->status_page.vma);
+       if (!vma)
+               return;
+
+       if (!HWS_NEEDS_PHYSICAL(engine->i915))
+               i915_vma_unpin(vma);
+
+       i915_gem_object_unpin_map(vma->obj);
+       __i915_gem_object_release_unless_active(vma->obj);
 }
 
-static void cleanup_status_page(struct intel_engine_cs *engine)
+static int pin_ggtt_status_page(struct intel_engine_cs *engine,
+                               struct i915_vma *vma)
 {
-       if (HWS_NEEDS_PHYSICAL(engine->i915)) {
-               void *addr = fetch_and_zero(&engine->status_page.page_addr);
+       unsigned int flags;
 
-               __free_page(virt_to_page(addr));
-       }
+       flags = PIN_GLOBAL;
+       if (!HAS_LLC(engine->i915))
+               /*
+                * On g33, we cannot place HWS above 256MiB, so
+                * restrict its pinning to the low mappable arena.
+                * Though this restriction is not documented for
+                * gen4, gen5, or byt, they also behave similarly
+                * and hang if the HWS is placed at the top of the
+                * GTT. To generalise, it appears that all !llc
+                * platforms have issues with us placing the HWS
+                * above the mappable region (even though we never
+                * actually map it).
+                */
+               flags |= PIN_MAPPABLE;
+       else
+               flags |= PIN_HIGH;
 
-       i915_vma_unpin_and_release(&engine->status_page.vma,
-                                  I915_VMA_RELEASE_MAP);
+       return i915_vma_pin(vma, 0, 0, flags);
 }
 
 static int init_status_page(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
-       unsigned int flags;
        void *vaddr;
        int ret;
 
+       /*
+        * Though the HWS register does support 36bit addresses, historically
+        * we have had hangs and corruption reported due to wild writes if
+        * the HWS is placed above 4G. We only allow objects to be allocated
+        * in GFP_DMA32 for i965, and no earlier physical address users had
+        * access to more than 4G.
+        */
        obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                DRM_ERROR("Failed to allocate status page\n");
@@ -529,59 +551,67 @@ static int init_status_page(struct intel_engine_cs *engine)
                goto err;
        }
 
-       flags = PIN_GLOBAL;
-       if (!HAS_LLC(engine->i915))
-               /* On g33, we cannot place HWS above 256MiB, so
-                * restrict its pinning to the low mappable arena.
-                * Though this restriction is not documented for
-                * gen4, gen5, or byt, they also behave similarly
-                * and hang if the HWS is placed at the top of the
-                * GTT. To generalise, it appears that all !llc
-                * platforms have issues with us placing the HWS
-                * above the mappable region (even though we never
-                * actually map it).
-                */
-               flags |= PIN_MAPPABLE;
-       else
-               flags |= PIN_HIGH;
-       ret = i915_vma_pin(vma, 0, 0, flags);
-       if (ret)
-               goto err;
-
        vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
-               goto err_unpin;
+               goto err;
        }
 
+       engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
        engine->status_page.vma = vma;
-       engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
-       engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
+
+       if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
+               ret = pin_ggtt_status_page(engine, vma);
+               if (ret)
+                       goto err_unpin;
+       }
+
        return 0;
 
 err_unpin:
-       i915_vma_unpin(vma);
+       i915_gem_object_unpin_map(obj);
 err:
        i915_gem_object_put(obj);
        return ret;
 }
 
-static int init_phys_status_page(struct intel_engine_cs *engine)
+/**
+ * intel_engines_setup_common - setup engine state not requiring hw access
+ * @engine: Engine to setup.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do not require hardware access.
+ *
+ * Typically done early in the submission mode specific engine setup stage.
+ */
+int intel_engine_setup_common(struct intel_engine_cs *engine)
 {
-       struct page *page;
+       int err;
 
-       /*
-        * Though the HWS register does support 36bit addresses, historically
-        * we have had hangs and corruption reported due to wild writes if
-        * the HWS is placed above 4G.
-        */
-       page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO);
-       if (!page)
-               return -ENOMEM;
+       err = init_status_page(engine);
+       if (err)
+               return err;
+
+       err = i915_timeline_init(engine->i915,
+                                &engine->timeline,
+                                engine->name,
+                                engine->status_page.vma);
+       if (err)
+               goto err_hwsp;
 
-       engine->status_page.page_addr = page_address(page);
+       i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
+
+       intel_engine_init_breadcrumbs(engine);
+       intel_engine_init_execlist(engine);
+       intel_engine_init_hangcheck(engine);
+       intel_engine_init_batch_pool(engine);
+       intel_engine_init_cmd_parser(engine);
 
        return 0;
+
+err_hwsp:
+       cleanup_status_page(engine);
+       return err;
 }
 
 static void __intel_context_unpin(struct i915_gem_context *ctx,
@@ -590,6 +620,56 @@ static void __intel_context_unpin(struct i915_gem_context *ctx,
        intel_context_unpin(to_intel_context(ctx, engine));
 }
 
+struct measure_breadcrumb {
+       struct i915_request rq;
+       struct i915_timeline timeline;
+       struct intel_ring ring;
+       u32 cs[1024];
+};
+
+static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
+{
+       struct measure_breadcrumb *frame;
+       int dw = -ENOMEM;
+
+       GEM_BUG_ON(!engine->i915->gt.scratch);
+
+       frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+       if (!frame)
+               return -ENOMEM;
+
+       if (i915_timeline_init(engine->i915,
+                              &frame->timeline, "measure",
+                              engine->status_page.vma))
+               goto out_frame;
+
+       INIT_LIST_HEAD(&frame->ring.request_list);
+       frame->ring.timeline = &frame->timeline;
+       frame->ring.vaddr = frame->cs;
+       frame->ring.size = sizeof(frame->cs);
+       frame->ring.effective_size = frame->ring.size;
+       intel_ring_update_space(&frame->ring);
+
+       frame->rq.i915 = engine->i915;
+       frame->rq.engine = engine;
+       frame->rq.ring = &frame->ring;
+       frame->rq.timeline = &frame->timeline;
+
+       dw = i915_timeline_pin(&frame->timeline);
+       if (dw < 0)
+               goto out_timeline;
+
+       dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+
+       i915_timeline_unpin(&frame->timeline);
+
+out_timeline:
+       i915_timeline_fini(&frame->timeline);
+out_frame:
+       kfree(frame);
+       return dw;
+}
+
 /**
  * intel_engines_init_common - initialize cengine state which might require hw access
  * @engine: Engine to initialize.
@@ -632,21 +712,14 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
                }
        }
 
-       ret = intel_engine_init_breadcrumbs(engine);
-       if (ret)
+       ret = measure_breadcrumb_dw(engine);
+       if (ret < 0)
                goto err_unpin_preempt;
 
-       if (HWS_NEEDS_PHYSICAL(i915))
-               ret = init_phys_status_page(engine);
-       else
-               ret = init_status_page(engine);
-       if (ret)
-               goto err_breadcrumbs;
+       engine->emit_fini_breadcrumb_dw = ret;
 
        return 0;
 
-err_breadcrumbs:
-       intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
        if (i915->preempt_context)
                __intel_context_unpin(i915->preempt_context, engine);
@@ -769,12 +842,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 
 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
 {
-       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u32 mcr_s_ss_select;
        u32 slice = fls(sseu->slice_mask);
        u32 subslice = fls(sseu->subslice_mask[slice]);
 
-       if (IS_GEN10(dev_priv))
+       if (IS_GEN(dev_priv, 10))
                mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
                                  GEN8_MCR_SUBSLICE(subslice);
        else if (INTEL_GEN(dev_priv) >= 11)
@@ -786,15 +859,15 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
        return mcr_s_ss_select;
 }
 
-static inline uint32_t
+static inline u32
 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
                  int subslice, i915_reg_t reg)
 {
-       uint32_t mcr_slice_subslice_mask;
-       uint32_t mcr_slice_subslice_select;
-       uint32_t default_mcr_s_ss_select;
-       uint32_t mcr;
-       uint32_t ret;
+       u32 mcr_slice_subslice_mask;
+       u32 mcr_slice_subslice_select;
+       u32 default_mcr_s_ss_select;
+       u32 mcr;
+       u32 ret;
        enum forcewake_domains fw_domains;
 
        if (INTEL_GEN(dev_priv) >= 11) {
@@ -900,10 +973,15 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 static bool ring_is_idle(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
+       intel_wakeref_t wakeref;
        bool idle = true;
 
+       if (I915_SELFTEST_ONLY(!engine->mmio_base))
+               return true;
+
        /* If the whole device is asleep, the engine must be idle */
-       if (!intel_runtime_pm_get_if_in_use(dev_priv))
+       wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+       if (!wakeref)
                return true;
 
        /* First check that no commands are left in the ring */
@@ -915,7 +993,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
        if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
                idle = false;
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return idle;
 }
@@ -939,9 +1017,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
        if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
                return false;
 
-       if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
-               return true;
-
        /* Waiting to drain ELSP? */
        if (READ_ONCE(engine->execlists.active)) {
                struct tasklet_struct *t = &engine->execlists.tasklet;
@@ -967,10 +1042,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return false;
 
        /* Ring stopped? */
-       if (!ring_is_idle(engine))
-               return false;
-
-       return true;
+       return ring_is_idle(engine);
 }
 
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
@@ -1014,7 +1086,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
         * the last request that remains in the timeline. When idle, it is
         * the last executed context as tracked by retirement.
         */
-       rq = __i915_gem_active_peek(&engine->timeline.last_request);
+       rq = __i915_active_request_peek(&engine->timeline.last_request);
        if (rq)
                return rq->hw_context == kernel_context;
        else
@@ -1030,26 +1102,36 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
                engine->set_default_submission(engine);
 }
 
+static bool reset_engines(struct drm_i915_private *i915)
+{
+       if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+               return false;
+
+       return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+}
+
 /**
  * intel_engines_sanitize: called after the GPU has lost power
  * @i915: the i915 device
+ * @force: ignore a failed reset and sanitize engine state anyway
  *
  * Anytime we reset the GPU, either with an explicit GPU reset or through a
  * PCI power cycle, the GPU loses state and we must reset our state tracking
  * to match. Note that calling intel_engines_sanitize() if the GPU has not
  * been reset results in much confusion!
  */
-void intel_engines_sanitize(struct drm_i915_private *i915)
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
        GEM_TRACE("\n");
 
-       for_each_engine(engine, i915, id) {
-               if (engine->reset.reset)
-                       engine->reset.reset(engine, NULL);
-       }
+       if (!reset_engines(i915) && !force)
+               return;
+
+       for_each_engine(engine, i915, id)
+               intel_engine_reset(engine, false);
 }
 
 /**
@@ -1085,7 +1167,7 @@ void intel_engines_park(struct drm_i915_private *i915)
                }
 
                /* Must be reset upon idling, or we may miss the busy wakeup. */
-               GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
+               GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
 
                if (engine->park)
                        engine->park(engine);
@@ -1201,10 +1283,14 @@ static void print_request(struct drm_printer *m,
 
        x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
 
-       drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n",
+       drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
                   prefix,
                   rq->global_seqno,
-                  i915_request_completed(rq) ? "!" : "",
+                  i915_request_completed(rq) ? "!" :
+                  i915_request_started(rq) ? "*" :
+                  "",
+                  test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+                           &rq->fence.flags) ?  "+" : "",
                   rq->fence.context, rq->fence.seqno,
                   buf,
                   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
@@ -1248,7 +1334,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
                &engine->execlists;
        u64 addr;
 
-       if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+       if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
                drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
        drm_printf(m, "\tRING_START: 0x%08x\n",
                   I915_READ(RING_START(engine->mmio_base)));
@@ -1269,16 +1355,6 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
                drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
        }
 
-       if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
-               drm_printf(m, "\tSYNC_0: 0x%08x\n",
-                          I915_READ(RING_SYNC_0(engine->mmio_base)));
-               drm_printf(m, "\tSYNC_1: 0x%08x\n",
-                          I915_READ(RING_SYNC_1(engine->mmio_base)));
-               if (HAS_VEBOX(dev_priv))
-                       drm_printf(m, "\tSYNC_2: 0x%08x\n",
-                                  I915_READ(RING_SYNC_2(engine->mmio_base)));
-       }
-
        addr = intel_engine_get_active_head(engine);
        drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
                   upper_32_bits(addr), lower_32_bits(addr));
@@ -1305,7 +1381,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
        }
 
        if (HAS_EXECLISTS(dev_priv)) {
-               const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+               const u32 *hws =
+                       &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
                unsigned int idx;
                u8 read, write;
 
@@ -1348,9 +1425,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
                                char hdr[80];
 
                                snprintf(hdr, sizeof(hdr),
-                                        "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
+                                        "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
                                         idx, count,
-                                        i915_ggtt_offset(rq->ring->vma));
+                                        i915_ggtt_offset(rq->ring->vma),
+                                        rq->timeline->hwsp_offset);
                                print_request(m, rq, hdr);
                        } else {
                                drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@@ -1405,14 +1483,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                       struct drm_printer *m,
                       const char *header, ...)
 {
-       const int MAX_REQUESTS_TO_SHOW = 8;
-       struct intel_breadcrumbs * const b = &engine->breadcrumbs;
-       const struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_gpu_error * const error = &engine->i915->gpu_error;
-       struct i915_request *rq, *last;
-       unsigned long flags;
-       struct rb_node *rb;
-       int count;
+       struct i915_request *rq;
+       intel_wakeref_t wakeref;
 
        if (header) {
                va_list ap;
@@ -1462,85 +1535,30 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                           rq->ring->emit);
                drm_printf(m, "\t\tring->space:  0x%08x\n",
                           rq->ring->space);
+               drm_printf(m, "\t\tring->hwsp:   0x%08x\n",
+                          rq->timeline->hwsp_offset);
 
                print_request_ring(m, rq);
        }
 
        rcu_read_unlock();
 
-       if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+       wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
+       if (wakeref) {
                intel_engine_print_registers(engine, m);
-               intel_runtime_pm_put(engine->i915);
+               intel_runtime_pm_put(engine->i915, wakeref);
        } else {
                drm_printf(m, "\tDevice is asleep; skipping register dump\n");
        }
 
-       local_irq_save(flags);
-       spin_lock(&engine->timeline.lock);
-
-       last = NULL;
-       count = 0;
-       list_for_each_entry(rq, &engine->timeline.requests, link) {
-               if (count++ < MAX_REQUESTS_TO_SHOW - 1)
-                       print_request(m, rq, "\t\tE ");
-               else
-                       last = rq;
-       }
-       if (last) {
-               if (count > MAX_REQUESTS_TO_SHOW) {
-                       drm_printf(m,
-                                  "\t\t...skipping %d executing requests...\n",
-                                  count - MAX_REQUESTS_TO_SHOW);
-               }
-               print_request(m, last, "\t\tE ");
-       }
-
-       last = NULL;
-       count = 0;
-       drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
-       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
-               struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-               int i;
-
-               priolist_for_each_request(rq, p, i) {
-                       if (count++ < MAX_REQUESTS_TO_SHOW - 1)
-                               print_request(m, rq, "\t\tQ ");
-                       else
-                               last = rq;
-               }
-       }
-       if (last) {
-               if (count > MAX_REQUESTS_TO_SHOW) {
-                       drm_printf(m,
-                                  "\t\t...skipping %d queued requests...\n",
-                                  count - MAX_REQUESTS_TO_SHOW);
-               }
-               print_request(m, last, "\t\tQ ");
-       }
-
-       spin_unlock(&engine->timeline.lock);
-
-       spin_lock(&b->rb_lock);
-       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
-               struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
-               drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
-                          w->tsk->comm, w->tsk->pid,
-                          task_state_to_char(w->tsk),
-                          w->seqno);
-       }
-       spin_unlock(&b->rb_lock);
-       local_irq_restore(flags);
-
-       drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
-                  engine->irq_posted,
-                  yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
-                                 &engine->irq_posted)));
+       intel_execlists_show_requests(engine, m, print_request, 8);
 
        drm_printf(m, "HWSP:\n");
-       hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
+       hexdump(m, engine->status_page.addr, PAGE_SIZE);
 
        drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
+
+       intel_engine_print_breadcrumbs(engine, m);
 }
 
 static u8 user_class_map[] = {
index f23570c44323b1ad324654fce53d3802de464ffc..656e684e7c9a3a01009a044f05da6fffbe1d1a4c 100644 (file)
@@ -38,6 +38,8 @@
  * forcibly disable it to allow proper screen updates.
  */
 
+#include <drm/drm_fourcc.h>
+
 #include "intel_drv.h"
 #include "i915_drv.h"
 
@@ -84,7 +86,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
        int lines;
 
        intel_fbc_get_plane_source_size(cache, NULL, &lines);
-       if (IS_GEN7(dev_priv))
+       if (IS_GEN(dev_priv, 7))
                lines = min(lines, 2048);
        else if (INTEL_GEN(dev_priv) >= 8)
                lines = min(lines, 2560);
@@ -127,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
                cfb_pitch = params->fb.stride;
 
        /* FBC_CTL wants 32B or 64B units */
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                cfb_pitch = (cfb_pitch / 32) - 1;
        else
                cfb_pitch = (cfb_pitch / 64) - 1;
@@ -136,7 +138,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
        for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
                I915_WRITE(FBC_TAG(i), 0);
 
-       if (IS_GEN4(dev_priv)) {
+       if (IS_GEN(dev_priv, 4)) {
                u32 fbc_ctl2;
 
                /* Set it up... */
@@ -233,9 +235,9 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 
        if (params->flags & PLANE_HAS_FENCE) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN;
-               if (IS_GEN5(dev_priv))
+               if (IS_GEN(dev_priv, 5))
                        dpfc_ctl |= params->vma->fence->id;
-               if (IS_GEN6(dev_priv)) {
+               if (IS_GEN(dev_priv, 6)) {
                        I915_WRITE(SNB_DPFC_CTL_SA,
                                   SNB_CPU_FENCE_ENABLE |
                                   params->vma->fence->id);
@@ -243,7 +245,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                                   params->crtc.fence_y_offset);
                }
        } else {
-               if (IS_GEN6(dev_priv)) {
+               if (IS_GEN(dev_priv, 6)) {
                        I915_WRITE(SNB_DPFC_CTL_SA, 0);
                        I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
                }
@@ -282,7 +284,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
        int threshold = dev_priv->fbc.threshold;
 
        /* Display WA #0529: skl, kbl, bxt. */
-       if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
                u32 val = I915_READ(CHICKEN_MISC_4);
 
                val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
@@ -581,10 +583,10 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
        if (stride < 512)
                return false;
 
-       if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
+       if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
                return stride == 4096 || stride == 8192;
 
-       if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
+       if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
                return false;
 
        if (stride > 16384)
@@ -594,7 +596,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
 }
 
 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
-                                 uint32_t pixel_format)
+                                 u32 pixel_format)
 {
        switch (pixel_format) {
        case DRM_FORMAT_XRGB8888:
@@ -603,7 +605,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
        case DRM_FORMAT_XRGB1555:
        case DRM_FORMAT_RGB565:
                /* 16bpp not supported on gen2 */
-               if (IS_GEN2(dev_priv))
+               if (IS_GEN(dev_priv, 2))
                        return false;
                /* WaFbcOnly1to1Ratio:ctg */
                if (IS_G4X(dev_priv))
@@ -626,7 +628,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        struct intel_fbc *fbc = &dev_priv->fbc;
        unsigned int effective_w, effective_h, max_w, max_h;
 
-       if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+               max_w = 5120;
+               max_h = 4096;
+       } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
                max_w = 4096;
                max_h = 4096;
        } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
@@ -784,7 +789,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         * having a Y offset that isn't divisible by 4 causes FIFO underrun
         * and screen flicker.
         */
-       if (IS_GEN(dev_priv, 9, 10) &&
+       if (IS_GEN_RANGE(dev_priv, 9, 10) &&
            (fbc->state_cache.plane.adjusted_y & 3)) {
                fbc->no_fbc_reason = "plane Y offset is misaligned";
                return false;
@@ -839,7 +844,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 
        params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
 
-       if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
                params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
                                                32 * fbc->threshold) * 8;
 }
@@ -1126,8 +1131,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
        if (!fbc_supported(dev_priv))
                return;
 
-       WARN_ON(crtc->active);
-
        mutex_lock(&fbc->lock);
        if (fbc->crtc == crtc)
                __intel_fbc_disable(dev_priv);
index fb5bb5b32a6034d152516ae11c15913e2f97597d..376ffe842e2678d1f31ee68acd38908cff8a85d9 100644 (file)
 #include <linux/init.h>
 #include <linux/vga_switcheroo.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
@@ -178,8 +179,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
        const struct i915_ggtt_view view = {
                .type = I915_GGTT_VIEW_NORMAL,
        };
-       struct fb_info *info;
        struct drm_framebuffer *fb;
+       intel_wakeref_t wakeref;
+       struct fb_info *info;
        struct i915_vma *vma;
        unsigned long flags = 0;
        bool prealloc = false;
@@ -210,7 +212,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
        }
 
        mutex_lock(&dev->struct_mutex);
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        /* Pin the GGTT vma for our access via info->screen_base.
         * This also validates that any existing fb inherited from the
@@ -277,7 +279,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
        ifbdev->vma = vma;
        ifbdev->vma_flags = flags;
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev->struct_mutex);
        vga_switcheroo_client_fb_set(pdev, info);
        return 0;
@@ -285,7 +287,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_unpin:
        intel_unpin_fb_vma(vma, flags);
 out_unlock:
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
@@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev)
        if (ifbdev == NULL)
                return -ENOMEM;
 
+       mutex_init(&ifbdev->hpd_lock);
        drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
 
        if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
        intel_fbdev_destroy(ifbdev);
 }
 
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
+ * was received while HPD was suspended.
+ */
+static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+{
+       bool send_hpd = false;
+
+       mutex_lock(&ifbdev->hpd_lock);
+       ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
+       send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
+       ifbdev->hpd_waiting = false;
+       mutex_unlock(&ifbdev->hpd_lock);
+
+       if (send_hpd) {
+               DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+               drm_fb_helper_hotplug_event(&ifbdev->helper);
+       }
+}
+
 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
                 */
                if (state != FBINFO_STATE_RUNNING)
                        flush_work(&dev_priv->fbdev_suspend_work);
+
                console_lock();
        } else {
                /*
@@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 
        drm_fb_helper_set_suspend(&ifbdev->helper, state);
        console_unlock();
+
+       intel_fbdev_hpd_set_suspend(ifbdev, state);
 }
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+       bool send_hpd;
 
        if (!ifbdev)
                return;
 
        intel_fbdev_sync(ifbdev);
-       if (ifbdev->vma || ifbdev->helper.deferred_setup)
+
+       mutex_lock(&ifbdev->hpd_lock);
+       send_hpd = !ifbdev->hpd_suspended;
+       ifbdev->hpd_waiting = true;
+       mutex_unlock(&ifbdev->hpd_lock);
+
+       if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
                drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
index 77c123cc88179e7f0ac3ef40af6d75fd34f59583..f33de4be4b89a7b07cc6a7b750e43bef9c439c07 100644 (file)
@@ -127,8 +127,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum pipe pipe, bool enable)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
-                                         DE_PIPEB_FIFO_UNDERRUN;
+       u32 bit = (pipe == PIPE_A) ?
+               DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
 
        if (enable)
                ilk_enable_display_irq(dev_priv, bit);
@@ -140,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
-       uint32_t err_int = I915_READ(GEN7_ERR_INT);
+       u32 err_int = I915_READ(GEN7_ERR_INT);
 
        lockdep_assert_held(&dev_priv->irq_lock);
 
@@ -193,8 +193,8 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
                                            bool enable)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t bit = (pch_transcoder == PIPE_A) ?
-                      SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+       u32 bit = (pch_transcoder == PIPE_A) ?
+               SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
        if (enable)
                ibx_enable_display_interrupt(dev_priv, bit);
@@ -206,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pch_transcoder = crtc->pipe;
-       uint32_t serr_int = I915_READ(SERR_INT);
+       u32 serr_int = I915_READ(SERR_INT);
 
        lockdep_assert_held(&dev_priv->irq_lock);
 
@@ -258,11 +258,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
        old = !crtc->cpu_fifo_underrun_disabled;
        crtc->cpu_fifo_underrun_disabled = !enable;
 
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
-       else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+       else if (IS_GEN_RANGE(dev_priv, 5, 6))
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
-       else if (IS_GEN7(dev_priv))
+       else if (IS_GEN(dev_priv, 7))
                ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
        else if (INTEL_GEN(dev_priv) >= 8)
                broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -369,7 +369,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
                return;
 
        /* GMCH can't disable fifo underruns, filter them. */
-       if (HAS_GMCH_DISPLAY(dev_priv) &&
+       if (HAS_GMCH(dev_priv) &&
            crtc->cpu_fifo_underrun_disabled)
                return;
 
@@ -421,9 +421,9 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
                if (crtc->cpu_fifo_underrun_disabled)
                        continue;
 
-               if (HAS_GMCH_DISPLAY(dev_priv))
+               if (HAS_GMCH(dev_priv))
                        i9xx_check_fifo_underruns(crtc);
-               else if (IS_GEN7(dev_priv))
+               else if (IS_GEN(dev_priv, 7))
                        ivybridge_check_fifo_underruns(crtc);
        }
 
index c3379bde266f12b1d1756733943ddc14b9d628e1..16f253deaf8d559647d2b5e2449761f8369a8a75 100644 (file)
@@ -60,7 +60,6 @@
  * functions is deprecated and should be avoided.
  */
 
-#include <drm/drmP.h>
 
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
index 105e2a9e874a114b01369a9faf9871433f1d8ceb..b96a31bc10809c44ecb17414174991758603dc7b 100644 (file)
 #define   MI_MEM_VIRTUAL       (1 << 22) /* 945,g33,965 */
 #define   MI_USE_GGTT          (1 << 22) /* g4x+ */
 #define MI_STORE_DWORD_INDEX   MI_INSTR(0x21, 1)
-#define   MI_STORE_DWORD_INDEX_SHIFT 2
 /*
  * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
  * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
index 0f1c4f9ebfd886581ac11beefb477ee047e0ab55..744220296653202fcc40c76c809c86fbf2d55c77 100644 (file)
@@ -192,4 +192,7 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
        spin_unlock_irq(&guc->irq_lock);
 }
 
+int intel_guc_reset_engine(struct intel_guc *guc,
+                          struct intel_engine_cs *engine);
+
 #endif
index a67144ee5ceb6c93a30f8a48f9d5314166e7cb94..13ff7003c6bef3f56d1fca2bdb3eb27ae63721c6 100644 (file)
@@ -77,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
                guc_fw->path = I915_KBL_GUC_UCODE;
                guc_fw->major_ver_wanted = KBL_FW_MAJOR;
                guc_fw->minor_ver_wanted = KBL_FW_MINOR;
-       } else {
-               dev_info(dev_priv->drm.dev,
-                        "%s: No firmware known for this platform!\n",
-                        intel_uc_fw_type_repr(guc_fw->type));
        }
 }
 
@@ -115,7 +111,7 @@ static void guc_prepare_xfer(struct intel_guc *guc)
        else
                I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
 
-       if (IS_GEN9(dev_priv)) {
+       if (IS_GEN(dev_priv, 9)) {
                /* DOP Clock Gating Enable for GuC clocks */
                I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
                                            I915_READ(GEN7_MISCCPCTL)));
index d3ebdbc0182e745b0d94611b862a6f03afbb8550..806fdfd7c78a7acf9777aa88cb72d1beb9c09fd4 100644 (file)
@@ -140,6 +140,9 @@ static struct dentry *create_buf_file_callback(const char *filename,
 
        buf_file = debugfs_create_file(filename, mode,
                                       parent, buf, &relay_file_operations);
+       if (IS_ERR(buf_file))
+               return NULL;
+
        return buf_file;
 }
 
@@ -436,6 +439,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
 {
        struct intel_guc *guc = log_to_guc(log);
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       intel_wakeref_t wakeref;
 
        guc_read_update_log_buffer(log);
 
@@ -443,9 +447,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
         * Generally device is expected to be active only at this
         * time, so get/put should be really quick.
         */
-       intel_runtime_pm_get(dev_priv);
-       guc_action_flush_log_complete(guc);
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref)
+               guc_action_flush_log_complete(guc);
 }
 
 int intel_guc_log_create(struct intel_guc_log *log)
@@ -505,7 +508,8 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
 {
        struct intel_guc *guc = log_to_guc(log);
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       int ret;
+       intel_wakeref_t wakeref;
+       int ret = 0;
 
        BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
        GEM_BUG_ON(!log->vma);
@@ -519,16 +523,14 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
-       if (log->level == level) {
-               ret = 0;
+       if (log->level == level)
                goto out_unlock;
-       }
 
-       intel_runtime_pm_get(dev_priv);
-       ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
-                                    GUC_LOG_LEVEL_IS_ENABLED(level),
-                                    GUC_LOG_LEVEL_TO_VERBOSITY(level));
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref)
+               ret = guc_action_control_log(guc,
+                                            GUC_LOG_LEVEL_IS_VERBOSE(level),
+                                            GUC_LOG_LEVEL_IS_ENABLED(level),
+                                            GUC_LOG_LEVEL_TO_VERBOSITY(level));
        if (ret) {
                DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
                goto out_unlock;
@@ -601,6 +603,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
 {
        struct intel_guc *guc = log_to_guc(log);
        struct drm_i915_private *i915 = guc_to_i915(guc);
+       intel_wakeref_t wakeref;
 
        /*
         * Before initiating the forceful flush, wait for any pending/ongoing
@@ -608,9 +611,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
         */
        flush_work(&log->relay.flush_work);
 
-       intel_runtime_pm_get(i915);
-       guc_action_flush_log(guc);
-       intel_runtime_pm_put(i915);
+       with_intel_runtime_pm(i915, wakeref)
+               guc_action_flush_log(guc);
 
        /* GuC would have updated log buffer by now, so capture it */
        guc_log_capture_logs(log);
index 1570dcbe249c0c8c6b9c8755bf0a89e2a8b2368b..8bc8aa54aa358b335e1a1e98d74f9d3d59323259 100644 (file)
  *
  */
 
+static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
+{
+       return (i915_ggtt_offset(engine->status_page.vma) +
+               I915_GEM_HWS_PREEMPT_ADDR);
+}
+
 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 {
        return rb_entry(rb, struct i915_priolist, node);
@@ -572,7 +578,8 @@ static void inject_preempt_context(struct work_struct *work)
                if (engine->id == RCS) {
                        cs = gen8_emit_ggtt_write_rcs(cs,
                                                      GUC_PREEMPT_FINISHED,
-                                                     addr);
+                                                     addr,
+                                                     PIPE_CONTROL_CS_STALL);
                } else {
                        cs = gen8_emit_ggtt_write(cs,
                                                  GUC_PREEMPT_FINISHED,
@@ -622,6 +629,8 @@ static void inject_preempt_context(struct work_struct *work)
                                       EXECLISTS_ACTIVE_PREEMPT);
                tasklet_schedule(&engine->execlists.tasklet);
        }
+
+       (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
 }
 
 /*
@@ -665,7 +674,7 @@ static void complete_preempt_context(struct intel_engine_cs *engine)
        execlists_unwind_incomplete_requests(execlists);
 
        wait_for_guc_preempt_report(engine);
-       intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+       intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0);
 }
 
 /**
@@ -730,7 +739,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
                if (intel_engine_has_preemption(engine)) {
                        struct guc_preempt_work *preempt_work =
                                &engine->i915->guc.preempt_work[engine->id];
-                       int prio = execlists->queue_priority;
+                       int prio = execlists->queue_priority_hint;
 
                        if (__execlists_need_preempt(prio, port_prio(port))) {
                                execlists_set_active(execlists,
@@ -776,7 +785,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
                        kmem_cache_free(engine->i915->priorities, p);
        }
 done:
-       execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
+       execlists->queue_priority_hint =
+               rb ? to_priolist(rb)->priority : INT_MIN;
        if (submit)
                port_assign(port, last);
        if (last)
@@ -823,7 +833,7 @@ static void guc_submission_tasklet(unsigned long data)
        }
 
        if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
-           intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
+           intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
            GUC_PREEMPT_FINISHED)
                complete_preempt_context(engine);
 
@@ -833,8 +843,7 @@ static void guc_submission_tasklet(unsigned long data)
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
-static struct i915_request *
-guc_reset_prepare(struct intel_engine_cs *engine)
+static void guc_reset_prepare(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
 
@@ -860,8 +869,6 @@ guc_reset_prepare(struct intel_engine_cs *engine)
         */
        if (engine->i915->guc.preempt_wq)
                flush_workqueue(engine->i915->guc.preempt_wq);
-
-       return i915_gem_find_active_request(engine);
 }
 
 /*
index c22b3e18a0f5ff54530b0676aad6303d6c298889..1d7d26e4cf14f7f2e7d3c4988a354d9d5c82f62b 100644 (file)
@@ -49,6 +49,9 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
                return true;
        if (IS_BROXTON(dev_priv))
                return true;
+       if (IS_COFFEELAKE(dev_priv))
+               return true;
+
        return false;
 }
 
@@ -105,15 +108,6 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
                return -EIO;
        }
 
-       /*
-        * We're not in host or fail to find a MPT module, disable GVT-g
-        */
-       ret = intel_gvt_init_host();
-       if (ret) {
-               DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
-               goto bail;
-       }
-
        ret = intel_gvt_init_device(dev_priv);
        if (ret) {
                DRM_DEBUG_DRIVER("Fail to init GVT device\n");
index e26d05a46451fe7f604474aa044e8f1307022177..a219c796e56d9c1f718e5e4d434ede8225a4823c 100644 (file)
  */
 
 #include "i915_drv.h"
+#include "i915_reset.h"
 
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
-       ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
-       return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
-                        MI_SEMAPHORE_REGISTER);
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
-                                u64 offset)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-       struct intel_engine_cs *signaller;
-       enum intel_engine_id id;
-
-       for_each_engine(signaller, dev_priv, id) {
-               if (engine == signaller)
-                       continue;
-
-               if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
-                       return signaller;
-       }
-
-       DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
-                        engine->name, ipehr);
-
-       return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       void __iomem *vaddr;
-       u32 cmd, ipehr, head;
-       u64 offset = 0;
-       int i, backwards;
-
-       /*
-        * This function does not support execlist mode - any attempt to
-        * proceed further into this function will result in a kernel panic
-        * when dereferencing ring->buffer, which is not set up in execlist
-        * mode.
-        *
-        * The correct way of doing it would be to derive the currently
-        * executing ring buffer from the current context, which is derived
-        * from the currently running request. Unfortunately, to get the
-        * current request we would have to grab the struct_mutex before doing
-        * anything else, which would be ill-advised since some other thread
-        * might have grabbed it already and managed to hang itself, causing
-        * the hang checker to deadlock.
-        *
-        * Therefore, this function does not support execlist mode in its
-        * current form. Just return NULL and move on.
-        */
-       if (engine->buffer == NULL)
-               return NULL;
-
-       ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-       if (!ipehr_is_semaphore_wait(engine, ipehr))
-               return NULL;
-
-       /*
-        * HEAD is likely pointing to the dword after the actual command,
-        * so scan backwards until we find the MBOX. But limit it to just 3
-        * or 4 dwords depending on the semaphore wait command size.
-        * Note that we don't care about ACTHD here since that might
-        * point at at batch, and semaphores are always emitted into the
-        * ringbuffer itself.
-        */
-       head = I915_READ_HEAD(engine) & HEAD_ADDR;
-       backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
-       vaddr = (void __iomem *)engine->buffer->vaddr;
-
-       for (i = backwards; i; --i) {
-               /*
-                * Be paranoid and presume the hw has gone off into the wild -
-                * our ring is smaller than what the hardware (and hence
-                * HEAD_ADDR) allows. Also handles wrap-around.
-                */
-               head &= engine->buffer->size - 1;
-
-               /* This here seems to blow up */
-               cmd = ioread32(vaddr + head);
-               if (cmd == ipehr)
-                       break;
-
-               head -= 4;
-       }
-
-       if (!i)
-               return NULL;
-
-       *seqno = ioread32(vaddr + head + 4) + 1;
-       return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       struct intel_engine_cs *signaller;
+struct hangcheck {
+       u64 acthd;
        u32 seqno;
-
-       engine->hangcheck.deadlock++;
-
-       signaller = semaphore_waits_for(engine, &seqno);
-       if (signaller == NULL)
-               return -1;
-
-       if (IS_ERR(signaller))
-               return 0;
-
-       /* Prevent pathological recursion due to driver bugs */
-       if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
-               return -1;
-
-       if (intel_engine_signaled(signaller, seqno))
-               return 1;
-
-       /* cursory check for an unkickable deadlock */
-       if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
-           semaphore_passed(signaller) < 0)
-               return -1;
-
-       return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       for_each_engine(engine, dev_priv, id)
-               engine->hangcheck.deadlock = 0;
-}
+       enum intel_engine_hangcheck_action action;
+       unsigned long action_timestamp;
+       int deadlock;
+       struct intel_instdone instdone;
+       bool wedged:1;
+       bool stalled:1;
+};
 
 static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
 {
@@ -236,7 +110,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
        if (ha != ENGINE_DEAD)
                return ha;
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                return ENGINE_DEAD;
 
        /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -252,54 +126,26 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
                return ENGINE_WAIT_KICK;
        }
 
-       if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
-               switch (semaphore_passed(engine)) {
-               default:
-                       return ENGINE_DEAD;
-               case 1:
-                       i915_handle_error(dev_priv, ALL_ENGINES, 0,
-                                         "stuck semaphore on %s",
-                                         engine->name);
-                       I915_WRITE_CTL(engine, tmp);
-                       return ENGINE_WAIT_KICK;
-               case 0:
-                       return ENGINE_WAIT;
-               }
-       }
-
        return ENGINE_DEAD;
 }
 
 static void hangcheck_load_sample(struct intel_engine_cs *engine,
-                                 struct intel_engine_hangcheck *hc)
+                                 struct hangcheck *hc)
 {
-       /* We don't strictly need an irq-barrier here, as we are not
-        * serving an interrupt request, be paranoid in case the
-        * barrier has side-effects (such as preventing a broken
-        * cacheline snoop) and so be sure that we can see the seqno
-        * advance. If the seqno should stick, due to a stale
-        * cacheline, we would erroneously declare the GPU hung.
-        */
-       if (engine->irq_seqno_barrier)
-               engine->irq_seqno_barrier(engine);
-
        hc->acthd = intel_engine_get_active_head(engine);
        hc->seqno = intel_engine_get_seqno(engine);
 }
 
 static void hangcheck_store_sample(struct intel_engine_cs *engine,
-                                  const struct intel_engine_hangcheck *hc)
+                                  const struct hangcheck *hc)
 {
        engine->hangcheck.acthd = hc->acthd;
        engine->hangcheck.seqno = hc->seqno;
-       engine->hangcheck.action = hc->action;
-       engine->hangcheck.stalled = hc->stalled;
-       engine->hangcheck.wedged = hc->wedged;
 }
 
 static enum intel_engine_hangcheck_action
 hangcheck_get_action(struct intel_engine_cs *engine,
-                    const struct intel_engine_hangcheck *hc)
+                    const struct hangcheck *hc)
 {
        if (engine->hangcheck.seqno != hc->seqno)
                return ENGINE_ACTIVE_SEQNO;
@@ -311,7 +157,7 @@ hangcheck_get_action(struct intel_engine_cs *engine,
 }
 
 static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
-                                       struct intel_engine_hangcheck *hc)
+                                       struct hangcheck *hc)
 {
        unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
 
@@ -357,10 +203,6 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
                break;
 
        case ENGINE_DEAD:
-               if (GEM_SHOW_DEBUG()) {
-                       struct drm_printer p = drm_debug_printer("hangcheck");
-                       intel_engine_dump(engine, &p, "%s\n", engine->name);
-               }
                break;
 
        default:
@@ -431,24 +273,35 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 
        for_each_engine(engine, dev_priv, id) {
-               struct intel_engine_hangcheck hc;
+               struct hangcheck hc;
 
-               semaphore_clear_deadlocks(dev_priv);
+               intel_engine_signal_breadcrumbs(engine);
 
                hangcheck_load_sample(engine, &hc);
                hangcheck_accumulate_sample(engine, &hc);
                hangcheck_store_sample(engine, &hc);
 
-               if (engine->hangcheck.stalled) {
+               if (hc.stalled) {
                        hung |= intel_engine_flag(engine);
                        if (hc.action != ENGINE_DEAD)
                                stuck |= intel_engine_flag(engine);
                }
 
-               if (engine->hangcheck.wedged)
+               if (hc.wedged)
                        wedged |= intel_engine_flag(engine);
        }
 
+       if (GEM_SHOW_DEBUG() && (hung | stuck)) {
+               struct drm_printer p = drm_debug_printer("hangcheck");
+
+               for_each_engine(engine, dev_priv, id) {
+                       if (intel_engine_is_idle(engine))
+                               continue;
+
+                       intel_engine_dump(engine, &p, "%s\n", engine->name);
+               }
+       }
+
        if (wedged) {
                dev_err(dev_priv->drm.dev,
                        "GPU recovery timed out,"
index 1bf487f9425404cb85b3b5f5ef3bb43be32a0405..ce7ba3a9c0002c7fd65b5378e7d82be4fc7afd92 100644 (file)
@@ -6,7 +6,6 @@
  * Sean Paul <seanpaul@chromium.org>
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_hdcp.h>
 #include <linux/i2c.h>
 #include <linux/random.h>
@@ -15,6 +14,7 @@
 #include "i915_reg.h"
 
 #define KEY_LOAD_TRIES 5
+#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS       50
 
 static
 bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -157,10 +157,11 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
        /*
         * Initiate loading the HDCP key from fuses.
         *
-        * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
-        * differ in the key load trigger process from other platforms.
+        * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
+        * platforms except BXT and GLK, differ in the key load trigger process
+        * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
         */
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_GEN9_BC(dev_priv)) {
                mutex_lock(&dev_priv->pcu_lock);
                ret = sandybridge_pcode_write(dev_priv,
                                              SKL_PCODE_LOAD_HDCP_KEYS, 1);
@@ -636,7 +637,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 
        /* Wait for encryption confirmation */
        if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
-                                   HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
+                                   HDCP_STATUS_ENC, HDCP_STATUS_ENC,
+                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
                DRM_ERROR("Timed out waiting for encryption\n");
                return -ETIMEDOUT;
        }
@@ -666,7 +668,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
 
        I915_WRITE(PORT_HDCP_CONF(port), 0);
        if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
-                                   20)) {
+                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
                DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
                return -ETIMEDOUT;
        }
@@ -768,8 +770,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
 {
        /* PORT E doesn't have HDCP, and PORT F is disabled */
-       return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
-               !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
+       return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
 }
 
 int intel_hdcp_init(struct intel_connector *connector,
@@ -837,8 +838,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
                             struct drm_connector_state *old_state,
                             struct drm_connector_state *new_state)
 {
-       uint64_t old_cp = old_state->content_protection;
-       uint64_t new_cp = new_state->content_protection;
+       u64 old_cp = old_state->content_protection;
+       u64 new_cp = new_state->content_protection;
        struct drm_crtc_state *crtc_state;
 
        if (!new_state->crtc) {
index 07e803a604bddada573810e15eef3c01975a9dd8..f125a62eba8cfe687f613d1aca2c2d03955f4b87 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/hdmi.h>
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -479,18 +478,14 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
                                         const struct intel_crtc_state *crtc_state,
                                         const struct drm_connector_state *conn_state)
 {
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
-       struct drm_connector *connector = &intel_hdmi->attached_connector->base;
-       bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported ||
-          connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
        union hdmi_infoframe frame;
        int ret;
 
        ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-                                                      adjusted_mode,
-                                                      is_hdmi2_sink);
+                                                      conn_state->connector,
+                                                      adjusted_mode);
        if (ret < 0) {
                DRM_ERROR("couldn't fill AVI infoframe\n");
                return;
@@ -503,12 +498,12 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
        else
                frame.avi.colorspace = HDMI_COLORSPACE_RGB;
 
-       drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
+       drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+                                          conn_state->connector,
+                                          adjusted_mode,
                                           crtc_state->limited_color_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
-                                          HDMI_QUANTIZATION_RANGE_FULL,
-                                          intel_hdmi->rgb_quant_range_selectable,
-                                          is_hdmi2_sink);
+                                          HDMI_QUANTIZATION_RANGE_FULL);
 
        drm_hdmi_avi_infoframe_content_type(&frame.avi,
                                            conn_state);
@@ -1191,15 +1186,17 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       intel_wakeref_t wakeref;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return false;
 
        ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
 
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
 
        return ret;
 }
@@ -1591,7 +1588,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 
        if (hdmi->has_hdmi_sink && !force_dvi) {
                /* if we can't do 8bpc we may still be able to do 12bpc */
-               if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv))
+               if (status != MODE_OK && !HAS_GMCH(dev_priv))
                        status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
                                                       true, force_dvi);
 
@@ -1616,7 +1613,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
                &crtc_state->base.adjusted_mode;
        int i;
 
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH(dev_priv))
                return false;
 
        if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
@@ -1707,9 +1704,9 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
        return true;
 }
 
-bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *pipe_config,
-                              struct drm_connector_state *conn_state)
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1725,7 +1722,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
@@ -1756,7 +1753,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
                                                &clock_12bpc, &clock_10bpc,
                                                &clock_8bpc)) {
                        DRM_ERROR("Can't support YCBCR420 output\n");
-                       return false;
+                       return -EINVAL;
                }
        }
 
@@ -1806,7 +1803,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
                                  false, force_dvi) != MODE_OK) {
                DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
-               return false;
+               return -EINVAL;
        }
 
        /* Set user selected PAR to incoming mode's member */
@@ -1825,7 +1822,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
                }
        }
 
-       return true;
+       return 0;
 }
 
 static void
@@ -1835,7 +1832,6 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
 
        intel_hdmi->has_hdmi_sink = false;
        intel_hdmi->has_audio = false;
-       intel_hdmi->rgb_quant_range_selectable = false;
 
        intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
        intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
@@ -1896,11 +1892,12 @@ intel_hdmi_set_edid(struct drm_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       intel_wakeref_t wakeref;
        struct edid *edid;
        bool connected = false;
        struct i2c_adapter *i2c;
 
-       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
        i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
 
@@ -1915,13 +1912,10 @@ intel_hdmi_set_edid(struct drm_connector *connector)
 
        intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
 
        to_intel_connector(connector)->detect_edid = edid;
        if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
-               intel_hdmi->rgb_quant_range_selectable =
-                       drm_rgb_quant_range_selectable(edid);
-
                intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
                intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
 
@@ -1940,11 +1934,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
        struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
+       intel_wakeref_t wakeref;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
-       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
        if (IS_ICELAKE(dev_priv) &&
            !intel_digital_port_connected(encoder))
@@ -1956,7 +1951,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
                status = connector_status_connected;
 
 out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
 
        if (status != connector_status_connected)
                cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
@@ -2155,7 +2150,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
        drm_connector_attach_content_type_property(connector);
        connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 
-       if (!HAS_GMCH_DISPLAY(dev_priv))
+       if (!HAS_GMCH(dev_priv))
                drm_connector_attach_max_bpc_property(connector, 8, 12);
 }
 
index e24174d08fedb55ca4619e61cb96570deab6bc52..b8937c788f03a58c13b15bda81bfe64d807f03b6 100644 (file)
@@ -23,7 +23,6 @@
 
 #include <linux/kernel.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
@@ -227,9 +226,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                container_of(work, typeof(*dev_priv),
                             hotplug.reenable_work.work);
        struct drm_device *dev = &dev_priv->drm;
+       intel_wakeref_t wakeref;
        enum hpd_pin pin;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        spin_lock_irq(&dev_priv->irq_lock);
        for_each_hpd_pin(pin) {
@@ -262,7 +262,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 }
 
 bool intel_encoder_hotplug(struct intel_encoder *encoder,
@@ -470,7 +470,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                         * hotplug bits itself. So only WARN about unexpected
                         * interrupts on saner platforms.
                         */
-                       WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
+                       WARN_ONCE(!HAS_GMCH(dev_priv),
                                  "Received HPD interrupt on pin %d although disabled\n", pin);
                        continue;
                }
index bc27b691d8248473abd207138817f10b55971751..9bd1c9002c2ad9095cf6d457308df63ef20787b8 100644 (file)
@@ -115,14 +115,14 @@ fail:
 int intel_huc_check_status(struct intel_huc *huc)
 {
        struct drm_i915_private *dev_priv = huc_to_i915(huc);
-       bool status;
+       intel_wakeref_t wakeref;
+       bool status = false;
 
        if (!HAS_HUC(dev_priv))
                return -ENODEV;
 
-       intel_runtime_pm_get(dev_priv);
-       status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
-       intel_runtime_pm_put(dev_priv);
+       with_intel_runtime_pm(dev_priv, wakeref)
+               status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
 
        return status;
 }
index f93d2384d4820ee23245f5d3eeb64fe39fd7aebe..7d7bfc7f7ca70caa0e6374ac3de87522a6d233b0 100644 (file)
@@ -23,8 +23,8 @@
  */
 
 #define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 07
-#define BXT_BLD_NUM 1398
+#define BXT_HUC_FW_MINOR 8
+#define BXT_BLD_NUM 2893
 
 #define SKL_HUC_FW_MAJOR 01
 #define SKL_HUC_FW_MINOR 07
@@ -76,9 +76,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
                huc_fw->path = I915_KBL_HUC_UCODE;
                huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
                huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
-       } else {
-               DRM_WARN("%s: No firmware known for this platform!\n",
-                        intel_uc_fw_type_repr(huc_fw->type));
        }
 }
 
index 802d0394ccc4ad82d92e244cce2109313c84b9a2..5a733e711355bc66eb8f9a77649d1c51df303350 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/export.h>
-#include <drm/drmP.h>
 #include <drm/drm_hdcp.h>
 #include "intel_drv.h"
 #include <drm/i915_drm.h>
@@ -698,12 +697,13 @@ out:
 static int
 gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 {
-       struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
-                                              adapter);
+       struct intel_gmbus *bus =
+               container_of(adapter, struct intel_gmbus, adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
+       intel_wakeref_t wakeref;
        int ret;
 
-       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
        if (bus->force_bit) {
                ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -715,17 +715,16 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
                        bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
        }
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
 
        return ret;
 }
 
 int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
 {
-       struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
-                                              adapter);
+       struct intel_gmbus *bus =
+               container_of(adapter, struct intel_gmbus, adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
-       int ret;
        u8 cmd = DRM_HDCP_DDC_AKSV;
        u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
        struct i2c_msg msgs[] = {
@@ -742,8 +741,10 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
                        .buf = buf,
                }
        };
+       intel_wakeref_t wakeref;
+       int ret;
 
-       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
        mutex_lock(&dev_priv->gmbus_mutex);
 
        /*
@@ -754,7 +755,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
        ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
 
        mutex_unlock(&dev_priv->gmbus_mutex);
-       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
 
        return ret;
 }
@@ -822,7 +823,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
-       else if (!HAS_GMCH_DISPLAY(dev_priv))
+       else if (!HAS_GMCH(dev_priv))
                /*
                 * Broxton uses the same PCH offsets for South Display Engine,
                 * even though it doesn't have a PCH.
index 5d5336fbe7b05836b7bedc28bffbfef9e6b08b4f..f8239bca38202d33a5ee42de7e0de9a8a853277d 100644 (file)
@@ -65,6 +65,7 @@
 #include <linux/irq.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
 
 #include "i915_drv.h"
 #include <linux/delay.h>
index d84c7815ee0ca4e400e3342fdc7d7635a08798d2..5e98fd79bd9df4a04950bdae683f8b67e2c4e7dd 100644 (file)
  */
 #include <linux/interrupt.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_gem_render_state.h"
+#include "i915_reset.h"
 #include "i915_vgpu.h"
 #include "intel_lrc_reg.h"
 #include "intel_mocs.h"
@@ -172,6 +172,12 @@ static void execlists_init_reg_state(u32 *reg_state,
                                     struct intel_engine_cs *engine,
                                     struct intel_ring *ring);
 
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
+{
+       return (i915_ggtt_offset(engine->status_page.vma) +
+               I915_GEM_HWS_INDEX_ADDR);
+}
+
 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 {
        return rb_entry(rb, struct i915_priolist, node);
@@ -182,13 +188,90 @@ static inline int rq_prio(const struct i915_request *rq)
        return rq->sched.attr.priority;
 }
 
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+       struct i915_priolist *p;
+       struct rb_node *rb;
+
+       rb = rb_first_cached(&execlists->queue);
+       if (!rb)
+               return INT_MIN;
+
+       /*
+        * As the priolist[] are inverted, with the highest priority in [0],
+        * we have to flip the index value to become priority.
+        */
+       p = to_priolist(rb);
+       return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
 static inline bool need_preempt(const struct intel_engine_cs *engine,
-                               const struct i915_request *last,
-                               int prio)
+                               const struct i915_request *rq)
 {
-       return (intel_engine_has_preemption(engine) &&
-               __execlists_need_preempt(prio, rq_prio(last)) &&
-               !i915_request_completed(last));
+       const int last_prio = rq_prio(rq);
+
+       if (!intel_engine_has_preemption(engine))
+               return false;
+
+       if (i915_request_completed(rq))
+               return false;
+
+       /*
+        * Check if the current priority hint merits a preemption attempt.
+        *
+        * We record the highest value priority we saw during rescheduling
+        * prior to this dequeue, therefore we know that if it is strictly
+        * less than the current tail of ESLP[0], we do not need to force
+        * a preempt-to-idle cycle.
+        *
+        * However, the priority hint is a mere hint that we may need to
+        * preempt. If that hint is stale or we may be trying to preempt
+        * ourselves, ignore the request.
+        */
+       if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
+                                     last_prio))
+               return false;
+
+       /*
+        * Check against the first request in ELSP[1], it will, thanks to the
+        * power of PI, be the highest priority of that context.
+        */
+       if (!list_is_last(&rq->link, &engine->timeline.requests) &&
+           rq_prio(list_next_entry(rq, link)) > last_prio)
+               return true;
+
+       /*
+        * If the inflight context did not trigger the preemption, then maybe
+        * it was the set of queued requests? Pick the highest priority in
+        * the queue (the first active priolist) and see if it deserves to be
+        * running instead of ELSP[0].
+        *
+        * The highest priority request in the queue can not be either
+        * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+        * context, it's priority would not exceed ELSP[0] aka last_prio.
+        */
+       return queue_prio(&engine->execlists) > last_prio;
+}
+
+__maybe_unused static inline bool
+assert_priority_queue(const struct intel_engine_execlists *execlists,
+                     const struct i915_request *prev,
+                     const struct i915_request *next)
+{
+       if (!prev)
+               return true;
+
+       /*
+        * Without preemption, the prev may refer to the still active element
+        * which we refuse to let go.
+        *
+        * Even with preemption, there are times when we think it is better not
+        * to preempt and leave an ostensibly lower priority request in flight.
+        */
+       if (port_request(execlists->port) == prev)
+               return true;
+
+       return rq_prio(prev) >= rq_prio(next);
 }
 
 /*
@@ -265,7 +348,8 @@ static void unwind_wa_tail(struct i915_request *rq)
        assert_ring_tail_valid(rq->ring, rq->tail);
 }
 
-static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
        struct i915_request *rq, *rn, *active = NULL;
        struct list_head *uninitialized_var(pl);
@@ -303,9 +387,12 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
         */
        if (!(prio & I915_PRIORITY_NEWCLIENT)) {
                prio |= I915_PRIORITY_NEWCLIENT;
+               active->sched.attr.priority = prio;
                list_move_tail(&active->sched.link,
                               i915_sched_lookup_priolist(engine, prio));
        }
+
+       return active;
 }
 
 void
@@ -363,31 +450,12 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
        trace_i915_request_out(rq);
 }
 
-static void
-execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
-{
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-}
-
 static u64 execlists_update_context(struct i915_request *rq)
 {
-       struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
        struct intel_context *ce = rq->hw_context;
-       u32 *reg_state = ce->lrc_reg_state;
 
-       reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
-
-       /*
-        * True 32b PPGTT with dynamic page allocation: update PDP
-        * registers and point the unallocated PDPs to scratch page.
-        * PML4 is allocated during ppgtt init, so this is not needed
-        * in 48-bit mode.
-        */
-       if (!i915_vm_is_48bit(&ppgtt->vm))
-               execlists_update_context_pdps(ppgtt, reg_state);
+       ce->lrc_reg_state[CTX_RING_TAIL + 1] =
+               intel_ring_set_tail(rq->ring, rq->tail);
 
        /*
         * Make sure the context image is complete before we submit it to HW.
@@ -455,11 +523,12 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
                        desc = execlists_update_context(rq);
                        GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
 
-                       GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
+                       GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
                                  engine->name, n,
                                  port[n].context_id, count,
                                  rq->global_seqno,
                                  rq->fence.context, rq->fence.seqno,
+                                 hwsp_seqno(rq),
                                  intel_engine_get_seqno(engine),
                                  rq_prio(rq));
                } else {
@@ -531,6 +600,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 
        execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
        execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+
+       (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
 }
 
 static void complete_preempt_context(struct intel_engine_execlists *execlists)
@@ -599,7 +670,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
                        return;
 
-               if (need_preempt(engine, last, execlists->queue_priority)) {
+               if (need_preempt(engine, last)) {
                        inject_preempt_context(engine);
                        return;
                }
@@ -632,7 +703,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                 * WaIdleLiteRestore:bdw,skl
                 * Apply the wa NOOPs to prevent
                 * ring:HEAD == rq:TAIL as we resubmit the
-                * request. See gen8_emit_breadcrumb() for
+                * request. See gen8_emit_fini_breadcrumb() for
                 * where we prepare the padding after the
                 * end of the request.
                 */
@@ -645,6 +716,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
+                       GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
+
                        /*
                         * Can we combine this request with the current port?
                         * It has to be the same context/ringbuffer and not
@@ -704,20 +777,20 @@ done:
        /*
         * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
         *
-        * We choose queue_priority such that if we add a request of greater
+        * We choose the priority hint such that if we add a request of greater
         * priority than this, we kick the submission tasklet to decide on
         * the right order of submitting the requests to hardware. We must
         * also be prepared to reorder requests as they are in-flight on the
-        * HW. We derive the queue_priority then as the first "hole" in
+        * HW. We derive the priority hint then as the first "hole" in
         * the HW submission ports and if there are no available slots,
         * the priority of the lowest executing request, i.e. last.
         *
         * When we do receive a higher priority request ready to run from the
-        * user, see queue_request(), the queue_priority is bumped to that
+        * user, see queue_request(), the priority hint is bumped to that
         * request triggering preemption on the next dequeue (or subsequent
         * interrupt for secondary ports).
         */
-       execlists->queue_priority =
+       execlists->queue_priority_hint =
                port != execlists->port ? rq_prio(last) : INT_MIN;
 
        if (submit) {
@@ -748,11 +821,12 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
        while (num_ports-- && port_isset(port)) {
                struct i915_request *rq = port_request(port);
 
-               GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d)\n",
+               GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
                          rq->engine->name,
                          (unsigned int)(port - execlists->port),
                          rq->global_seqno,
                          rq->fence.context, rq->fence.seqno,
+                         hwsp_seqno(rq),
                          intel_engine_get_seqno(rq->engine));
 
                GEM_BUG_ON(!execlists->active);
@@ -770,6 +844,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
        execlists_clear_all_active(execlists);
 }
 
+static inline void
+invalidate_csb_entries(const u32 *first, const u32 *last)
+{
+       clflush((void *)first);
+       clflush((void *)last);
+}
+
 static void reset_csb_pointers(struct intel_engine_execlists *execlists)
 {
        const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
@@ -785,6 +866,9 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
         */
        execlists->csb_head = reset_value;
        WRITE_ONCE(*execlists->csb_write, reset_value);
+
+       invalidate_csb_entries(&execlists->csb_status[0],
+                              &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
 }
 
 static void nop_submission_tasklet(unsigned long data)
@@ -826,10 +910,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        list_for_each_entry(rq, &engine->timeline.requests, link) {
                GEM_BUG_ON(!rq->global_seqno);
 
-               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
-                       continue;
+               if (!i915_request_signaled(rq))
+                       dma_fence_set_error(&rq->fence, -EIO);
 
-               dma_fence_set_error(&rq->fence, -EIO);
+               i915_request_mark_complete(rq);
        }
 
        /* Flush the queued requests to the timeline list (for retiring). */
@@ -839,9 +923,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
                priolist_for_each_request_consume(rq, rn, p, i) {
                        list_del_init(&rq->sched.link);
-
-                       dma_fence_set_error(&rq->fence, -EIO);
                        __i915_request_submit(rq);
+                       dma_fence_set_error(&rq->fence, -EIO);
+                       i915_request_mark_complete(rq);
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
@@ -855,7 +939,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-       execlists->queue_priority = INT_MIN;
+       execlists->queue_priority_hint = INT_MIN;
        execlists->queue = RB_ROOT_CACHED;
        GEM_BUG_ON(port_isset(execlists->port));
 
@@ -878,6 +962,8 @@ static void process_csb(struct intel_engine_cs *engine)
        const u32 * const buf = execlists->csb_status;
        u8 head, tail;
 
+       lockdep_assert_held(&engine->timeline.lock);
+
        /*
         * Note that csb_write, csb_status may be either in HWSP or mmio.
         * When reading from the csb_write mmio register, we have to be
@@ -966,12 +1052,13 @@ static void process_csb(struct intel_engine_cs *engine)
                                                EXECLISTS_ACTIVE_USER));
 
                rq = port_unpack(port, &count);
-               GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
+               GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
                          engine->name,
                          port->context_id, count,
                          rq ? rq->global_seqno : 0,
                          rq ? rq->fence.context : 0,
                          rq ? rq->fence.seqno : 0,
+                         rq ? hwsp_seqno(rq) : 0,
                          intel_engine_get_seqno(engine),
                          rq ? rq_prio(rq) : 0);
 
@@ -1020,6 +1107,19 @@ static void process_csb(struct intel_engine_cs *engine)
        } while (head != tail);
 
        execlists->csb_head = head;
+
+       /*
+        * Gen11 has proven to fail wrt global observation point between
+        * entry and tail update, failing on the ordering and thus
+        * we see an old entry in the context status buffer.
+        *
+        * Forcibly evict out entries for the next gpu csb update,
+        * to increase the odds that we get a fresh entries with non
+        * working hardware. The cost for doing so comes out mostly with
+        * the wash as hardware, working or not, will need to do the
+        * invalidation before.
+        */
+       invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
 }
 
 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
@@ -1042,7 +1142,7 @@ static void execlists_submission_tasklet(unsigned long data)
 
        GEM_TRACE("%s awake?=%d, active=%x\n",
                  engine->name,
-                 engine->i915->gt.awake,
+                 !!engine->i915->gt.awake,
                  engine->execlists.active);
 
        spin_lock_irqsave(&engine->timeline.lock, flags);
@@ -1072,8 +1172,8 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
 
 static void submit_queue(struct intel_engine_cs *engine, int prio)
 {
-       if (prio > engine->execlists.queue_priority) {
-               engine->execlists.queue_priority = prio;
+       if (prio > engine->execlists.queue_priority_hint) {
+               engine->execlists.queue_priority_hint = prio;
                __submit_queue_imm(engine);
        }
 }
@@ -1166,6 +1266,23 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
        return i915_vma_pin(vma, 0, 0, flags);
 }
 
+static void
+__execlists_update_reg_state(struct intel_engine_cs *engine,
+                            struct intel_context *ce)
+{
+       u32 *regs = ce->lrc_reg_state;
+       struct intel_ring *ring = ce->ring;
+
+       regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
+       regs[CTX_RING_HEAD + 1] = ring->head;
+       regs[CTX_RING_TAIL + 1] = ring->tail;
+
+       /* RPCS */
+       if (engine->class == RENDER_CLASS)
+               regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
+                                                              &ce->sseu);
+}
+
 static struct intel_context *
 __execlists_context_pin(struct intel_engine_cs *engine,
                        struct i915_gem_context *ctx,
@@ -1204,10 +1321,8 @@ __execlists_context_pin(struct intel_engine_cs *engine,
        GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
 
        ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-       ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
-               i915_ggtt_offset(ce->ring->vma);
-       ce->lrc_reg_state[CTX_RING_HEAD + 1] = ce->ring->head;
-       ce->lrc_reg_state[CTX_RING_TAIL + 1] = ce->ring->tail;
+
+       __execlists_update_reg_state(engine, ce);
 
        ce->state->obj->pin_global++;
        i915_gem_context_get(ctx);
@@ -1247,29 +1362,116 @@ execlists_context_pin(struct intel_engine_cs *engine,
        return __execlists_context_pin(engine, ctx, ce);
 }
 
+static int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+       u32 *cs;
+
+       GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
+
+       cs = intel_ring_begin(rq, 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /*
+        * Check if we have been preempted before we even get started.
+        *
+        * After this point i915_request_started() reports true, even if
+        * we get preempted and so are no longer running.
+        */
+       *cs++ = MI_ARB_CHECK;
+       *cs++ = MI_NOOP;
+
+       *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+       *cs++ = rq->timeline->hwsp_offset;
+       *cs++ = 0;
+       *cs++ = rq->fence.seqno - 1;
+
+       intel_ring_advance(rq, cs);
+       return 0;
+}
+
+static int emit_pdps(struct i915_request *rq)
+{
+       const struct intel_engine_cs * const engine = rq->engine;
+       struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
+       int err, i;
+       u32 *cs;
+
+       GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+       /*
+        * Beware ye of the dragons, this sequence is magic!
+        *
+        * Small changes to this sequence can cause anything from
+        * GPU hangs to forcewake errors and machine lockups!
+        */
+
+       /* Flush any residual operations from the context load */
+       err = engine->emit_flush(rq, EMIT_FLUSH);
+       if (err)
+               return err;
+
+       /* Magic required to prevent forcewake errors! */
+       err = engine->emit_flush(rq, EMIT_INVALIDATE);
+       if (err)
+               return err;
+
+       cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /* Ensure the LRI have landed before we invalidate & continue */
+       *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+       for (i = GEN8_3LVL_PDPES; i--; ) {
+               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+               *cs++ = upper_32_bits(pd_daddr);
+               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+               *cs++ = lower_32_bits(pd_daddr);
+       }
+       *cs++ = MI_NOOP;
+
+       intel_ring_advance(rq, cs);
+
+       /* Be doubly sure the LRI have landed before proceeding */
+       err = engine->emit_flush(rq, EMIT_FLUSH);
+       if (err)
+               return err;
+
+       /* Re-invalidate the TLB for luck */
+       return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
 static int execlists_request_alloc(struct i915_request *request)
 {
        int ret;
 
        GEM_BUG_ON(!request->hw_context->pin_count);
 
-       /* Flush enough space to reduce the likelihood of waiting after
+       /*
+        * Flush enough space to reduce the likelihood of waiting after
         * we start building the request - in which case we will just
         * have to repeat work.
         */
        request->reserved_space += EXECLISTS_REQUEST_SIZE;
 
-       ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
-       if (ret)
-               return ret;
-
-       /* Note that after this point, we have committed to using
+       /*
+        * Note that after this point, we have committed to using
         * this request as it is being used to both track the
         * state of engine initialisation and liveness of the
         * golden renderstate above. Think twice before you try
         * to cancel/unwind this request now.
         */
 
+       /* Unconditionally invalidate GPU caches and TLBs. */
+       if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
+               ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+       else
+               ret = emit_pdps(request);
+       if (ret)
+               return ret;
+
        request->reserved_space -= EXECLISTS_REQUEST_SIZE;
        return 0;
 }
@@ -1592,7 +1794,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
-       I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+       intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
 
        /*
         * Make sure we're not enabling the new 12-deep CSB
@@ -1613,7 +1815,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
                   _MASKED_BIT_DISABLE(STOP_RING));
 
        I915_WRITE(RING_HWS_PGA(engine->mmio_base),
-                  engine->status_page.ggtt_offset);
+                  i915_ggtt_offset(engine->status_page.vma));
        POSTING_READ(RING_HWS_PGA(engine->mmio_base));
 }
 
@@ -1633,6 +1835,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        intel_engine_apply_workarounds(engine);
+       intel_engine_apply_whitelist(engine);
 
        intel_mocs_init_engine(engine);
 
@@ -1649,48 +1852,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
        return 0;
 }
 
-static int gen8_init_render_ring(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       ret = gen8_init_common_ring(engine);
-       if (ret)
-               return ret;
-
-       intel_engine_apply_whitelist(engine);
-
-       /* We need to disable the AsyncFlip performance optimisations in order
-        * to use MI_WAIT_FOR_EVENT within the CS. It should already be
-        * programmed to '1' on all products.
-        *
-        * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
-        */
-       I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
-       I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
-
-       return 0;
-}
-
-static int gen9_init_render_ring(struct intel_engine_cs *engine)
-{
-       int ret;
-
-       ret = gen8_init_common_ring(engine);
-       if (ret)
-               return ret;
-
-       intel_engine_apply_whitelist(engine);
-
-       return 0;
-}
-
-static struct i915_request *
-execlists_reset_prepare(struct intel_engine_cs *engine)
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct i915_request *request, *active;
        unsigned long flags;
 
        GEM_TRACE("%s: depth<-%d\n", engine->name,
@@ -1706,59 +1870,21 @@ execlists_reset_prepare(struct intel_engine_cs *engine)
         * prevents the race.
         */
        __tasklet_disable_sync_once(&execlists->tasklet);
+       GEM_BUG_ON(!reset_in_progress(execlists));
 
+       /* And flush any current direct submission. */
        spin_lock_irqsave(&engine->timeline.lock, flags);
-
-       /*
-        * We want to flush the pending context switches, having disabled
-        * the tasklet above, we can assume exclusive access to the execlists.
-        * For this allows us to catch up with an inflight preemption event,
-        * and avoid blaming an innocent request if the stall was due to the
-        * preemption itself.
-        */
-       process_csb(engine);
-
-       /*
-        * The last active request can then be no later than the last request
-        * now in ELSP[0]. So search backwards from there, so that if the GPU
-        * has advanced beyond the last CSB update, it will be pardoned.
-        */
-       active = NULL;
-       request = port_request(execlists->port);
-       if (request) {
-               /*
-                * Prevent the breadcrumb from advancing before we decide
-                * which request is currently active.
-                */
-               intel_engine_stop_cs(engine);
-
-               list_for_each_entry_from_reverse(request,
-                                                &engine->timeline.requests,
-                                                link) {
-                       if (__i915_request_completed(request,
-                                                    request->global_seqno))
-                               break;
-
-                       active = request;
-               }
-       }
-
+       process_csb(engine); /* drain preemption events */
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
-       return active;
 }
 
-static void execlists_reset(struct intel_engine_cs *engine,
-                           struct i915_request *request)
+static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_request *rq;
        unsigned long flags;
        u32 *regs;
 
-       GEM_TRACE("%s request global=%d, current=%d\n",
-                 engine->name, request ? request->global_seqno : 0,
-                 intel_engine_get_seqno(engine));
-
        spin_lock_irqsave(&engine->timeline.lock, flags);
 
        /*
@@ -1773,12 +1899,18 @@ static void execlists_reset(struct intel_engine_cs *engine,
        execlists_cancel_port_requests(execlists);
 
        /* Push back any incomplete requests for replay after the reset. */
-       __unwind_incomplete_requests(engine);
+       rq = __unwind_incomplete_requests(engine);
 
        /* Following the reset, we need to reload the CSB read/write pointers */
        reset_csb_pointers(&engine->execlists);
 
-       spin_unlock_irqrestore(&engine->timeline.lock, flags);
+       GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
+                 engine->name,
+                 rq ? rq->global_seqno : 0,
+                 intel_engine_get_seqno(engine),
+                 yesno(stalled));
+       if (!rq)
+               goto out_unlock;
 
        /*
         * If the request was innocent, we leave the request in the ELSP
@@ -1791,8 +1923,9 @@ static void execlists_reset(struct intel_engine_cs *engine,
         * and have to at least restore the RING register in the context
         * image back to the expected values to skip over the guilty request.
         */
-       if (!request || request->fence.error != -EIO)
-               return;
+       i915_reset_request(rq, stalled);
+       if (!stalled)
+               goto out_unlock;
 
        /*
         * We want a simple context + ring to execute the breadcrumb update.
@@ -1802,25 +1935,22 @@ static void execlists_reset(struct intel_engine_cs *engine,
         * future request will be after userspace has had the opportunity
         * to recreate its own state.
         */
-       regs = request->hw_context->lrc_reg_state;
+       regs = rq->hw_context->lrc_reg_state;
        if (engine->pinned_default_state) {
                memcpy(regs, /* skip restoring the vanilla PPHWSP */
                       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
                       engine->context_size - PAGE_SIZE);
        }
-       execlists_init_reg_state(regs,
-                                request->gem_context, engine, request->ring);
 
        /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
-       regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
+       rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
+       intel_ring_update_space(rq->ring);
 
-       request->ring->head = intel_ring_wrap(request->ring, request->postfix);
-       regs[CTX_RING_HEAD + 1] = request->ring->head;
+       execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
+       __execlists_update_reg_state(engine, rq->hw_context);
 
-       intel_ring_update_space(request->ring);
-
-       /* Reset WaIdleLiteRestore:bdw,skl as well */
-       unwind_wa_tail(request);
+out_unlock:
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static void execlists_reset_finish(struct intel_engine_cs *engine)
@@ -1833,6 +1963,7 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
         * to sleep before we restart and reload a context.
         *
         */
+       GEM_BUG_ON(!reset_in_progress(execlists));
        if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
                execlists->tasklet.func(execlists->tasklet.data);
 
@@ -1841,56 +1972,11 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
                  atomic_read(&execlists->tasklet.count));
 }
 
-static int intel_logical_ring_emit_pdps(struct i915_request *rq)
-{
-       struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
-       struct intel_engine_cs *engine = rq->engine;
-       const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
-       u32 *cs;
-       int i;
-
-       cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
-       for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
-               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
-               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
-               *cs++ = upper_32_bits(pd_daddr);
-               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
-               *cs++ = lower_32_bits(pd_daddr);
-       }
-
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
-
-       return 0;
-}
-
 static int gen8_emit_bb_start(struct i915_request *rq,
                              u64 offset, u32 len,
                              const unsigned int flags)
 {
        u32 *cs;
-       int ret;
-
-       /* Don't rely in hw updating PDPs, specially in lite-restore.
-        * Ideally, we should set Force PD Restore in ctx descriptor,
-        * but we can't. Force Restore would be a second option, but
-        * it is unsafe in case of lite-restore (because the ctx is
-        * not idle). PML4 is allocated during ppgtt init so this is
-        * not needed in 48-bit.*/
-       if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
-           !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
-           !intel_vgpu_active(rq->i915)) {
-               ret = intel_logical_ring_emit_pdps(rq);
-               if (ret)
-                       return ret;
-
-               rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
-       }
 
        cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
@@ -1923,6 +2009,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
        *cs++ = MI_NOOP;
+
        intel_ring_advance(rq, cs);
 
        return 0;
@@ -2007,7 +2094,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
                 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
                 * pipe control.
                 */
-               if (IS_GEN9(request->i915))
+               if (IS_GEN(request->i915, 9))
                        vf_flush_wa = true;
 
                /* WaForGAMHang:kbl */
@@ -2049,45 +2136,62 @@ static int gen8_emit_flush_render(struct i915_request *request,
  * used as a workaround for not being allowed to do lite
  * restore with HEAD==TAIL (WaIdleLiteRestore).
  */
-static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
+static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
 {
        /* Ensure there's always at least one preemption point per-request. */
        *cs++ = MI_ARB_CHECK;
        *cs++ = MI_NOOP;
        request->wa_tail = intel_ring_offset(request, cs);
+
+       return cs;
 }
 
-static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
 {
        /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
        BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-       cs = gen8_emit_ggtt_write(cs, request->global_seqno,
+       cs = gen8_emit_ggtt_write(cs,
+                                 request->fence.seqno,
+                                 request->timeline->hwsp_offset);
+
+       cs = gen8_emit_ggtt_write(cs,
+                                 request->global_seqno,
                                  intel_hws_seqno_address(request->engine));
+
        *cs++ = MI_USER_INTERRUPT;
        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
        request->tail = intel_ring_offset(request, cs);
        assert_ring_tail_valid(request->ring, request->tail);
 
-       gen8_emit_wa_tail(request, cs);
+       return gen8_emit_wa_tail(request, cs);
 }
-static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
 
-static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 {
-       /* We're using qword write, seqno should be aligned to 8 bytes. */
-       BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+       cs = gen8_emit_ggtt_write_rcs(cs,
+                                     request->fence.seqno,
+                                     request->timeline->hwsp_offset,
+                                     PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+                                     PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                                     PIPE_CONTROL_DC_FLUSH_ENABLE |
+                                     PIPE_CONTROL_FLUSH_ENABLE |
+                                     PIPE_CONTROL_CS_STALL);
+
+       cs = gen8_emit_ggtt_write_rcs(cs,
+                                     request->global_seqno,
+                                     intel_hws_seqno_address(request->engine),
+                                     PIPE_CONTROL_CS_STALL);
 
-       cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
-                                     intel_hws_seqno_address(request->engine));
        *cs++ = MI_USER_INTERRUPT;
        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
        request->tail = intel_ring_offset(request, cs);
        assert_ring_tail_valid(request->ring, request->tail);
 
-       gen8_emit_wa_tail(request, cs);
+       return gen8_emit_wa_tail(request, cs);
 }
-static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
 
 static int gen8_init_rcs_context(struct i915_request *rq)
 {
@@ -2179,8 +2283,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
        engine->request_alloc = execlists_request_alloc;
 
        engine->emit_flush = gen8_emit_flush;
-       engine->emit_breadcrumb = gen8_emit_breadcrumb;
-       engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
+       engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+       engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
 
        engine->set_default_submission = intel_execlists_set_default_submission;
 
@@ -2219,10 +2323,14 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
        engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
 
-static void
+static int
 logical_ring_setup(struct intel_engine_cs *engine)
 {
-       intel_engine_setup_common(engine);
+       int err;
+
+       err = intel_engine_setup_common(engine);
+       if (err)
+               return err;
 
        /* Intentionally left blank. */
        engine->buffer = NULL;
@@ -2232,6 +2340,8 @@ logical_ring_setup(struct intel_engine_cs *engine)
 
        logical_ring_default_vfuncs(engine);
        logical_ring_default_irqs(engine);
+
+       return 0;
 }
 
 static int logical_ring_init(struct intel_engine_cs *engine)
@@ -2244,6 +2354,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
+       intel_engine_init_workarounds(engine);
+
        if (HAS_LOGICAL_RING_ELSQ(i915)) {
                execlists->submit_reg = i915->regs +
                        i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
@@ -2264,10 +2376,10 @@ static int logical_ring_init(struct intel_engine_cs *engine)
        }
 
        execlists->csb_status =
-               &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+               &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
 
        execlists->csb_write =
-               &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
+               &engine->status_page.addr[intel_hws_csb_write_index(i915)];
 
        reset_csb_pointers(execlists);
 
@@ -2276,23 +2388,16 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 
 int logical_render_ring_init(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
-       logical_ring_setup(engine);
-
-       if (HAS_L3_DPF(dev_priv))
-               engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+       ret = logical_ring_setup(engine);
+       if (ret)
+               return ret;
 
        /* Override some for render ring. */
-       if (INTEL_GEN(dev_priv) >= 9)
-               engine->init_hw = gen9_init_render_ring;
-       else
-               engine->init_hw = gen8_init_render_ring;
        engine->init_context = gen8_init_rcs_context;
        engine->emit_flush = gen8_emit_flush_render;
-       engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
-       engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
+       engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
 
        ret = logical_ring_init(engine);
        if (ret)
@@ -2310,33 +2415,64 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
        }
 
        intel_engine_init_whitelist(engine);
-       intel_engine_init_workarounds(engine);
 
        return 0;
 }
 
 int logical_xcs_ring_init(struct intel_engine_cs *engine)
 {
-       logical_ring_setup(engine);
+       int err;
+
+       err = logical_ring_setup(engine);
+       if (err)
+               return err;
 
        return logical_ring_init(engine);
 }
 
-static u32
-make_rpcs(struct drm_i915_private *dev_priv)
+u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *req_sseu)
 {
-       bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg;
-       u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
-       u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]);
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+       bool subslice_pg = sseu->has_subslice_pg;
+       struct intel_sseu ctx_sseu;
+       u8 slices, subslices;
        u32 rpcs = 0;
 
        /*
         * No explicit RPCS request is needed to ensure full
         * slice/subslice/EU enablement prior to Gen9.
        */
-       if (INTEL_GEN(dev_priv) < 9)
+       if (INTEL_GEN(i915) < 9)
                return 0;
 
+       /*
+        * If i915/perf is active, we want a stable powergating configuration
+        * on the system.
+        *
+        * We could choose full enablement, but on ICL we know there are use
+        * cases which disable slices for functional, apart for performance
+        * reasons. So in this case we select a known stable subset.
+        */
+       if (!i915->perf.oa.exclusive_stream) {
+               ctx_sseu = *req_sseu;
+       } else {
+               ctx_sseu = intel_device_default_sseu(i915);
+
+               if (IS_GEN(i915, 11)) {
+                       /*
+                        * We only need subslice count so it doesn't matter
+                        * which ones we select - just turn off low bits in the
+                        * amount of half of all available subslices per slice.
+                        */
+                       ctx_sseu.subslice_mask =
+                               ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
+                       ctx_sseu.slice_mask = 0x1;
+               }
+       }
+
+       slices = hweight8(ctx_sseu.slice_mask);
+       subslices = hweight8(ctx_sseu.subslice_mask);
+
        /*
         * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
         * wide and Icelake has up to eight subslices, specfial programming is
@@ -2362,7 +2498,9 @@ make_rpcs(struct drm_i915_private *dev_priv)
         * subslices are enabled, or a count between one and four on the first
         * slice.
         */
-       if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) {
+       if (IS_GEN(i915, 11) &&
+           slices == 1 &&
+           subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
                GEM_BUG_ON(subslices & 1);
 
                subslice_pg = false;
@@ -2375,10 +2513,10 @@ make_rpcs(struct drm_i915_private *dev_priv)
         * must make an explicit request through RPCS for full
         * enablement.
        */
-       if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
+       if (sseu->has_slice_pg) {
                u32 mask, val = slices;
 
-               if (INTEL_GEN(dev_priv) >= 11) {
+               if (INTEL_GEN(i915) >= 11) {
                        mask = GEN11_RPCS_S_CNT_MASK;
                        val <<= GEN11_RPCS_S_CNT_SHIFT;
                } else {
@@ -2403,18 +2541,16 @@ make_rpcs(struct drm_i915_private *dev_priv)
                rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
        }
 
-       if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+       if (sseu->has_eu_pg) {
                u32 val;
 
-               val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
-                     GEN8_RPCS_EU_MIN_SHIFT;
+               val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
                GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
                val &= GEN8_RPCS_EU_MIN_MASK;
 
                rpcs |= val;
 
-               val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
-                     GEN8_RPCS_EU_MAX_SHIFT;
+               val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
                GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
                val &= GEN8_RPCS_EU_MAX_MASK;
 
@@ -2538,12 +2674,16 @@ static void execlists_init_reg_state(u32 *regs,
                 * other PDP Descriptors are ignored.
                 */
                ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+       } else {
+               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
+               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
+               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
+               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
        }
 
        if (rcs) {
                regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
-               CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
-                       make_rpcs(dev_priv));
+               CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
 
                i915_oa_init_reg_state(engine, ctx, regs);
        }
@@ -2620,7 +2760,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 {
        struct drm_i915_gem_object *ctx_obj;
        struct i915_vma *vma;
-       uint32_t context_size;
+       u32 context_size;
        struct intel_ring *ring;
        struct i915_timeline *timeline;
        int ret;
@@ -2646,7 +2786,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                goto error_deref_obj;
        }
 
-       timeline = i915_timeline_create(ctx->i915, ctx->name);
+       timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
        if (IS_ERR(timeline)) {
                ret = PTR_ERR(timeline);
                goto error_deref_obj;
@@ -2704,14 +2844,70 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
 
                        intel_ring_reset(ce->ring, 0);
 
-                       if (ce->pin_count) { /* otherwise done in context_pin */
-                               u32 *regs = ce->lrc_reg_state;
+                       if (ce->pin_count) /* otherwise done in context_pin */
+                               __execlists_update_reg_state(engine, ce);
+               }
+       }
+}
 
-                               regs[CTX_RING_HEAD + 1] = ce->ring->head;
-                               regs[CTX_RING_TAIL + 1] = ce->ring->tail;
-                       }
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+                                  struct drm_printer *m,
+                                  void (*show_request)(struct drm_printer *m,
+                                                       struct i915_request *rq,
+                                                       const char *prefix),
+                                  unsigned int max)
+{
+       const struct intel_engine_execlists *execlists = &engine->execlists;
+       struct i915_request *rq, *last;
+       unsigned long flags;
+       unsigned int count;
+       struct rb_node *rb;
+
+       spin_lock_irqsave(&engine->timeline.lock, flags);
+
+       last = NULL;
+       count = 0;
+       list_for_each_entry(rq, &engine->timeline.requests, link) {
+               if (count++ < max - 1)
+                       show_request(m, rq, "\t\tE ");
+               else
+                       last = rq;
+       }
+       if (last) {
+               if (count > max) {
+                       drm_printf(m,
+                                  "\t\t...skipping %d executing requests...\n",
+                                  count - max);
+               }
+               show_request(m, last, "\t\tE ");
+       }
+
+       last = NULL;
+       count = 0;
+       if (execlists->queue_priority_hint != INT_MIN)
+               drm_printf(m, "\t\tQueue priority hint: %d\n",
+                          execlists->queue_priority_hint);
+       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+               struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+               int i;
+
+               priolist_for_each_request(rq, p, i) {
+                       if (count++ < max - 1)
+                               show_request(m, rq, "\t\tQ ");
+                       else
+                               last = rq;
+               }
+       }
+       if (last) {
+               if (count > max) {
+                       drm_printf(m,
+                                  "\t\t...skipping %d queued requests...\n",
+                                  count - max);
                }
+               show_request(m, last, "\t\tQ ");
        }
+
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index f5a5502ecf70fa475f0e866acb393f6192892584..f1aec8a6986fd8ebb610a96dbd48c7bd082d7637 100644 (file)
@@ -97,11 +97,21 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
  */
 #define LRC_HEADER_PAGES LRC_PPHWSP_PN
 
+struct drm_printer;
+
 struct drm_i915_private;
 struct i915_gem_context;
 
 void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-
 void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
 
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+                                  struct drm_printer *m,
+                                  void (*show_request)(struct drm_printer *m,
+                                                       struct i915_request *rq,
+                                                       const char *prefix),
+                                  unsigned int max);
+
+u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *ctx_sseu);
+
 #endif /* _INTEL_LRC_H_ */
index 96a8d9524b0c24445972e99c5a3bc958f379b739..322bdddda164db496a4190323004ebf2ed916e86 100644 (file)
@@ -288,12 +288,12 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
 }
 
 static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
-                                                 uint8_t *avi_buf)
+                                                 u8 *avi_buf)
 {
        u8 avi_if_ctrl;
        u8 block_count = 0;
        u8 *data;
-       uint16_t reg;
+       u16 reg;
        ssize_t ret;
 
        while (block_count < 4) {
@@ -335,10 +335,10 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
 }
 
 static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
-                                              const uint8_t *frame,
+                                              const u8 *frame,
                                               ssize_t len)
 {
-       uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+       u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
 
        /*
         * Parade's frames contains 32 bytes of data, divided
@@ -367,13 +367,13 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
 }
 
 static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
-                                           const uint8_t *buffer, ssize_t len)
+                                           const u8 *buffer, ssize_t len)
 {
        int ret;
-       uint32_t val = 0;
-       uint32_t retry;
-       uint16_t reg;
-       const uint8_t *data = buffer;
+       u32 val = 0;
+       u32 retry;
+       u16 reg;
+       const u8 *data = buffer;
 
        reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
        while (val < len) {
@@ -459,13 +459,11 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
 {
        ssize_t ret;
        union hdmi_infoframe frame;
-       uint8_t buf[VIDEO_DIP_DATA_SIZE];
+       u8 buf[VIDEO_DIP_DATA_SIZE];
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
        struct intel_lspcon *lspcon = &dig_port->lspcon;
-       struct intel_dp *intel_dp = &dig_port->dp;
-       struct drm_connector *connector = &intel_dp->attached_connector->base;
-       const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
-       bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
 
        if (!lspcon->active) {
                DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
@@ -473,7 +471,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
        }
 
        ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-                                                      mode, is_hdmi2_sink);
+                                                      conn_state->connector,
+                                                      adjusted_mode);
        if (ret < 0) {
                DRM_ERROR("couldn't fill AVI infoframe\n");
                return;
@@ -488,11 +487,12 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
                frame.avi.colorspace = HDMI_COLORSPACE_RGB;
        }
 
-       drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+       drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+                                          conn_state->connector,
+                                          adjusted_mode,
                                           crtc_state->limited_color_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
-                                          HDMI_QUANTIZATION_RANGE_FULL,
-                                          false, is_hdmi2_sink);
+                                          HDMI_QUANTIZATION_RANGE_FULL);
 
        ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
        if (ret < 0) {
index e6c5d985ea0afd9d6f8eadb45228dad3ee6323f0..b4aa49768e90fb27997692d38e36824ac81327aa 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -95,15 +94,17 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+       intel_wakeref_t wakeref;
        bool ret;
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return false;
 
        ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
 
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
 
        return ret;
 }
@@ -279,7 +280,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
         * special lvds dither control bit on pch-split platforms, dithering is
         * only controlled through the PIPECONF reg.
         */
-       if (IS_GEN4(dev_priv)) {
+       if (IS_GEN(dev_priv, 4)) {
                /*
                 * Bspec wording suggests that LVDS port dithering only exists
                 * for 18bpp panels.
@@ -379,9 +380,9 @@ intel_lvds_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
-                                     struct intel_crtc_state *pipe_config,
-                                     struct drm_connector_state *conn_state)
+static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
        struct intel_lvds_encoder *lvds_encoder =
@@ -395,7 +396,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        /* Should never happen!! */
        if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
                DRM_ERROR("Can't support LVDS on pipe A\n");
-               return false;
+               return -EINVAL;
        }
 
        if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
@@ -421,7 +422,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
                               adjusted_mode);
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        if (HAS_PCH_SPLIT(dev_priv)) {
                pipe_config->has_pch_encoder = true;
@@ -440,7 +441,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
         * user's requested refresh rate.
         */
 
-       return true;
+       return 0;
 }
 
 static enum drm_connector_status
@@ -797,26 +798,6 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
        return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
 }
 
-static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
-{
-       /*
-        * With the introduction of the PCH we gained a dedicated
-        * LVDS presence pin, use it.
-        */
-       if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
-               return true;
-
-       /*
-        * Otherwise LVDS was only attached to mobile products,
-        * except for the inglorious 830gm
-        */
-       if (INTEL_GEN(dev_priv) <= 4 &&
-           IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
-               return true;
-
-       return false;
-}
-
 /**
  * intel_lvds_init - setup LVDS connectors on this device
  * @dev_priv: i915 device
@@ -841,9 +822,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        u8 pin;
        u32 allowed_scalers;
 
-       if (!intel_lvds_supported(dev_priv))
-               return;
-
        /* Skip init on machines we know falsely report LVDS */
        if (dmi_check_system(intel_no_lvds)) {
                WARN(!dev_priv->vbt.int_lvds_support,
@@ -909,6 +887,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        }
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
        intel_encoder->get_config = intel_lvds_get_config;
+       intel_encoder->update_pipe = intel_panel_update_backlight;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
@@ -919,7 +898,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        intel_encoder->cloneable = 0;
        if (HAS_PCH_SPLIT(dev_priv))
                intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-       else if (IS_GEN4(dev_priv))
+       else if (IS_GEN(dev_priv, 4))
                intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
        else
                intel_encoder->crtc_mask = (1 << 1);
index 77e9871a8c9ac87ca4d2360262d7d815cd8270b0..331e7a678fb700ccd5ca4a07303382ccdf291086 100644 (file)
 struct drm_i915_mocs_entry {
        u32 control_value;
        u16 l3cc_value;
+       u16 used;
 };
 
 struct drm_i915_mocs_table {
-       u32 size;
+       unsigned int size;
+       unsigned int n_entries;
        const struct drm_i915_mocs_entry *table;
 };
 
 /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
-#define LE_CACHEABILITY(value) ((value) << 0)
-#define LE_TGT_CACHE(value)    ((value) << 2)
+#define _LE_CACHEABILITY(value)        ((value) << 0)
+#define _LE_TGT_CACHE(value)   ((value) << 2)
 #define LE_LRUM(value)         ((value) << 4)
 #define LE_AOM(value)          ((value) << 6)
 #define LE_RSC(value)          ((value) << 7)
 #define LE_SCC(value)          ((value) << 8)
 #define LE_PFM(value)          ((value) << 11)
 #define LE_SCF(value)          ((value) << 14)
+#define LE_COS(value)          ((value) << 15)
+#define LE_SSE(value)          ((value) << 17)
 
 /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
 #define L3_ESC(value)          ((value) << 0)
 #define L3_SCC(value)          ((value) << 1)
-#define L3_CACHEABILITY(value) ((value) << 4)
+#define _L3_CACHEABILITY(value)        ((value) << 4)
 
 /* Helper defines */
 #define GEN9_NUM_MOCS_ENTRIES  62  /* 62 out of 64 - 63 & 64 are reserved. */
+#define GEN11_NUM_MOCS_ENTRIES 64  /* 63-64 are reserved, but configured. */
 
 /* (e)LLC caching options */
-#define LE_PAGETABLE           0
-#define LE_UC                  1
-#define LE_WT                  2
-#define LE_WB                  3
-
-/* L3 caching options */
-#define L3_DIRECT              0
-#define L3_UC                  1
-#define L3_RESERVED            2
-#define L3_WB                  3
+#define LE_0_PAGETABLE         _LE_CACHEABILITY(0)
+#define LE_1_UC                        _LE_CACHEABILITY(1)
+#define LE_2_WT                        _LE_CACHEABILITY(2)
+#define LE_3_WB                        _LE_CACHEABILITY(3)
 
 /* Target cache */
-#define LE_TC_PAGETABLE                0
-#define LE_TC_LLC              1
-#define LE_TC_LLC_ELLC         2
-#define LE_TC_LLC_ELLC_ALT     3
+#define LE_TC_0_PAGETABLE      _LE_TGT_CACHE(0)
+#define LE_TC_1_LLC            _LE_TGT_CACHE(1)
+#define LE_TC_2_LLC_ELLC       _LE_TGT_CACHE(2)
+#define LE_TC_3_LLC_ELLC_ALT   _LE_TGT_CACHE(3)
+
+/* L3 caching options */
+#define L3_0_DIRECT            _L3_CACHEABILITY(0)
+#define L3_1_UC                        _L3_CACHEABILITY(1)
+#define L3_2_RESERVED          _L3_CACHEABILITY(2)
+#define L3_3_WB                        _L3_CACHEABILITY(3)
+
+#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
+       [__idx] = { \
+               .control_value = __control_value, \
+               .l3cc_value = __l3cc_value, \
+               .used = 1, \
+       }
 
 /*
  * MOCS tables
@@ -80,85 +92,147 @@ struct drm_i915_mocs_table {
  * LNCFCMOCS0 - LNCFCMOCS32 registers.
  *
  * These tables are intended to be kept reasonably consistent across
- * platforms. However some of the fields are not applicable to all of
- * them.
+ * HW platforms, and for ICL+, be identical across OSes. To achieve
+ * that, for Icelake and above, list of entries is published as part
+ * of bspec.
  *
  * Entries not part of the following tables are undefined as far as
  * userspace is concerned and shouldn't be relied upon.  For the time
- * being they will be implicitly initialized to the strictest caching
- * configuration (uncached) to guarantee forwards compatibility with
- * userspace programs written against more recent kernels providing
- * additional MOCS entries.
+ * being they will be initialized to PTE.
  *
- * NOTE: These tables MUST start with being uncached and the length
- *       MUST be less than 63 as the last two registers are reserved
- *       by the hardware.  These tables are part of the kernel ABI and
- *       may only be updated incrementally by adding entries at the
- *       end.
+ * The last two entries are reserved by the hardware. For ICL+ they
+ * should be initialized according to bspec and never used, for older
+ * platforms they should never be written to.
+ *
+ * NOTE: These tables are part of bspec and defined as part of hardware
+ *       interface for ICL+. For older platforms, they are part of kernel
+ *       ABI. It is expected that, for specific hardware platform, existing
+ *       entries will remain constant and the table will only be updated by
+ *       adding new entries, filling unused positions.
  */
+#define GEN9_MOCS_ENTRIES \
+       MOCS_ENTRY(I915_MOCS_UNCACHED, \
+                  LE_1_UC | LE_TC_2_LLC_ELLC, \
+                  L3_1_UC), \
+       MOCS_ENTRY(I915_MOCS_PTE, \
+                  LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
+                  L3_3_WB)
+
 static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
-       [I915_MOCS_UNCACHED] = {
-         /* 0x00000009 */
-         .control_value = LE_CACHEABILITY(LE_UC) |
-                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
-                          LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
-                          LE_PFM(0) | LE_SCF(0),
-
-         /* 0x0010 */
-         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
-       },
-       [I915_MOCS_PTE] = {
-         /* 0x00000038 */
-         .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
-                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
-                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
-                          LE_PFM(0) | LE_SCF(0),
-         /* 0x0030 */
-         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
-       },
-       [I915_MOCS_CACHED] = {
-         /* 0x0000003b */
-         .control_value = LE_CACHEABILITY(LE_WB) |
-                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
-                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
-                          LE_PFM(0) | LE_SCF(0),
-         /* 0x0030 */
-         .l3cc_value =   L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
-       },
+       GEN9_MOCS_ENTRIES,
+       MOCS_ENTRY(I915_MOCS_CACHED,
+                  LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+                  L3_3_WB)
 };
 
 /* NOTE: the LE_TGT_CACHE is not used on Broxton */
 static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
-       [I915_MOCS_UNCACHED] = {
-         /* 0x00000009 */
-         .control_value = LE_CACHEABILITY(LE_UC) |
-                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
-                          LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
-                          LE_PFM(0) | LE_SCF(0),
-
-         /* 0x0010 */
-         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
-       },
-       [I915_MOCS_PTE] = {
-         /* 0x00000038 */
-         .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
-                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
-                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
-                          LE_PFM(0) | LE_SCF(0),
-
-         /* 0x0030 */
-         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
-       },
-       [I915_MOCS_CACHED] = {
-         /* 0x00000039 */
-         .control_value = LE_CACHEABILITY(LE_UC) |
-                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
-                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
-                          LE_PFM(0) | LE_SCF(0),
-
-         /* 0x0030 */
-         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
-       },
+       GEN9_MOCS_ENTRIES,
+       MOCS_ENTRY(I915_MOCS_CACHED,
+                  LE_1_UC | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+                  L3_3_WB)
+};
+
+#define GEN11_MOCS_ENTRIES \
+       /* Base - Uncached (Deprecated) */ \
+       MOCS_ENTRY(I915_MOCS_UNCACHED, \
+                  LE_1_UC | LE_TC_1_LLC, \
+                  L3_1_UC), \
+       /* Base - L3 + LeCC:PAT (Deprecated) */ \
+       MOCS_ENTRY(I915_MOCS_PTE, \
+                  LE_0_PAGETABLE | LE_TC_1_LLC, \
+                  L3_3_WB), \
+       /* Base - L3 + LLC */ \
+       MOCS_ENTRY(2, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+                  L3_3_WB), \
+       /* Base - Uncached */ \
+       MOCS_ENTRY(3, \
+                  LE_1_UC | LE_TC_1_LLC, \
+                  L3_1_UC), \
+       /* Base - L3 */ \
+       MOCS_ENTRY(4, \
+                  LE_1_UC | LE_TC_1_LLC, \
+                  L3_3_WB), \
+       /* Base - LLC */ \
+       MOCS_ENTRY(5, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+                  L3_1_UC), \
+       /* Age 0 - LLC */ \
+       MOCS_ENTRY(6, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
+                  L3_1_UC), \
+       /* Age 0 - L3 + LLC */ \
+       MOCS_ENTRY(7, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
+                  L3_3_WB), \
+       /* Age: Don't Chg. - LLC */ \
+       MOCS_ENTRY(8, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
+                  L3_1_UC), \
+       /* Age: Don't Chg. - L3 + LLC */ \
+       MOCS_ENTRY(9, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
+                  L3_3_WB), \
+       /* No AOM - LLC */ \
+       MOCS_ENTRY(10, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
+                  L3_1_UC), \
+       /* No AOM - L3 + LLC */ \
+       MOCS_ENTRY(11, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
+                  L3_3_WB), \
+       /* No AOM; Age 0 - LLC */ \
+       MOCS_ENTRY(12, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
+                  L3_1_UC), \
+       /* No AOM; Age 0 - L3 + LLC */ \
+       MOCS_ENTRY(13, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
+                  L3_3_WB), \
+       /* No AOM; Age:DC - LLC */ \
+       MOCS_ENTRY(14, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
+                  L3_1_UC), \
+       /* No AOM; Age:DC - L3 + LLC */ \
+       MOCS_ENTRY(15, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
+                  L3_3_WB), \
+       /* Self-Snoop - L3 + LLC */ \
+       MOCS_ENTRY(18, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
+                  L3_3_WB), \
+       /* Skip Caching - L3 + LLC(12.5%) */ \
+       MOCS_ENTRY(19, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), \
+                  L3_3_WB), \
+       /* Skip Caching - L3 + LLC(25%) */ \
+       MOCS_ENTRY(20, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), \
+                  L3_3_WB), \
+       /* Skip Caching - L3 + LLC(50%) */ \
+       MOCS_ENTRY(21, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), \
+                  L3_3_WB), \
+       /* Skip Caching - L3 + LLC(75%) */ \
+       MOCS_ENTRY(22, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), \
+                  L3_3_WB), \
+       /* Skip Caching - L3 + LLC(87.5%) */ \
+       MOCS_ENTRY(23, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), \
+                  L3_3_WB), \
+       /* HW Reserved - SW program but never use */ \
+       MOCS_ENTRY(62, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+                  L3_1_UC), \
+       /* HW Reserved - SW program but never use */ \
+       MOCS_ENTRY(63, \
+                  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+                  L3_1_UC)
+
+static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+       GEN11_MOCS_ENTRIES
 };
 
 /**
@@ -178,13 +252,19 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
 {
        bool result = false;
 
-       if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv) ||
-           IS_ICELAKE(dev_priv)) {
+       if (IS_ICELAKE(dev_priv)) {
+               table->size  = ARRAY_SIZE(icelake_mocs_table);
+               table->table = icelake_mocs_table;
+               table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+               result = true;
+       } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                table->size  = ARRAY_SIZE(skylake_mocs_table);
+               table->n_entries = GEN9_NUM_MOCS_ENTRIES;
                table->table = skylake_mocs_table;
                result = true;
        } else if (IS_GEN9_LP(dev_priv)) {
                table->size  = ARRAY_SIZE(broxton_mocs_table);
+               table->n_entries = GEN9_NUM_MOCS_ENTRIES;
                table->table = broxton_mocs_table;
                result = true;
        } else {
@@ -193,7 +273,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
        }
 
        /* WaDisableSkipCaching:skl,bxt,kbl,glk */
-       if (IS_GEN9(dev_priv)) {
+       if (IS_GEN(dev_priv, 9)) {
                int i;
 
                for (i = 0; i < table->size; i++)
@@ -226,6 +306,19 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
        }
 }
 
+/*
+ * Get control_value from MOCS entry taking into account when it's not used:
+ * I915_MOCS_PTE's value is returned in this case.
+ */
+static u32 get_entry_control(const struct drm_i915_mocs_table *table,
+                            unsigned int index)
+{
+       if (table->table[index].used)
+               return table->table[index].control_value;
+
+       return table->table[I915_MOCS_PTE].control_value;
+}
+
 /**
  * intel_mocs_init_engine() - emit the mocs control table
  * @engine:    The engine for whom to emit the registers.
@@ -238,27 +331,23 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
        struct drm_i915_private *dev_priv = engine->i915;
        struct drm_i915_mocs_table table;
        unsigned int index;
+       u32 unused_value;
 
        if (!get_mocs_settings(dev_priv, &table))
                return;
 
-       GEM_BUG_ON(table.size > GEN9_NUM_MOCS_ENTRIES);
-
-       for (index = 0; index < table.size; index++)
-               I915_WRITE(mocs_register(engine->id, index),
-                          table.table[index].control_value);
-
-       /*
-        * Ok, now set the unused entries to uncached. These entries
-        * are officially undefined and no contract for the contents
-        * and settings is given for these entries.
-        *
-        * Entry 0 in the table is uncached - so we are just writing
-        * that value to all the used entries.
-        */
-       for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
-               I915_WRITE(mocs_register(engine->id, index),
-                          table.table[0].control_value);
+       /* Set unused values to PTE */
+       unused_value = table.table[I915_MOCS_PTE].control_value;
+
+       for (index = 0; index < table.size; index++) {
+               u32 value = get_entry_control(&table, index);
+
+               I915_WRITE(mocs_register(engine->id, index), value);
+       }
+
+       /* All remaining entries are also unused */
+       for (; index < table.n_entries; index++)
+               I915_WRITE(mocs_register(engine->id, index), unused_value);
 }
 
 /**
@@ -276,33 +365,32 @@ static int emit_mocs_control_table(struct i915_request *rq,
 {
        enum intel_engine_id engine = rq->engine->id;
        unsigned int index;
+       u32 unused_value;
        u32 *cs;
 
-       if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+       if (GEM_WARN_ON(table->size > table->n_entries))
                return -ENODEV;
 
-       cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+       /* Set unused values to PTE */
+       unused_value = table->table[I915_MOCS_PTE].control_value;
+
+       cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
+       *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries);
 
        for (index = 0; index < table->size; index++) {
+               u32 value = get_entry_control(table, index);
+
                *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
-               *cs++ = table->table[index].control_value;
+               *cs++ = value;
        }
 
-       /*
-        * Ok, now set the unused entries to uncached. These entries
-        * are officially undefined and no contract for the contents
-        * and settings is given for these entries.
-        *
-        * Entry 0 in the table is uncached - so we are just writing
-        * that value to all the used entries.
-        */
-       for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+       /* All remaining entries are also unused */
+       for (; index < table->n_entries; index++) {
                *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
-               *cs++ = table->table[0].control_value;
+               *cs++ = unused_value;
        }
 
        *cs++ = MI_NOOP;
@@ -311,12 +399,24 @@ static int emit_mocs_control_table(struct i915_request *rq,
        return 0;
 }
 
+/*
+ * Get l3cc_value from MOCS entry taking into account when it's not used:
+ * I915_MOCS_PTE's value is returned in this case.
+ */
+static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
+                         unsigned int index)
+{
+       if (table->table[index].used)
+               return table->table[index].l3cc_value;
+
+       return table->table[I915_MOCS_PTE].l3cc_value;
+}
+
 static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
                               u16 low,
                               u16 high)
 {
-       return table->table[low].l3cc_value |
-              table->table[high].l3cc_value << 16;
+       return low | high << 16;
 }
 
 /**
@@ -333,38 +433,43 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct i915_request *rq,
                                const struct drm_i915_mocs_table *table)
 {
+       u16 unused_value;
        unsigned int i;
        u32 *cs;
 
-       if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+       if (GEM_WARN_ON(table->size > table->n_entries))
                return -ENODEV;
 
-       cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES);
+       /* Set unused values to PTE */
+       unused_value = table->table[I915_MOCS_PTE].l3cc_value;
+
+       cs = intel_ring_begin(rq, 2 + table->n_entries);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
+       *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2);
+
+       for (i = 0; i < table->size / 2; i++) {
+               u16 low = get_entry_l3cc(table, 2 * i);
+               u16 high = get_entry_l3cc(table, 2 * i + 1);
 
-       for (i = 0; i < table->size/2; i++) {
                *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
-               *cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
+               *cs++ = l3cc_combine(table, low, high);
        }
 
+       /* Odd table size - 1 left over */
        if (table->size & 0x01) {
-               /* Odd table size - 1 left over */
+               u16 low = get_entry_l3cc(table, 2 * i);
+
                *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
-               *cs++ = l3cc_combine(table, 2 * i, 0);
+               *cs++ = l3cc_combine(table, low, unused_value);
                i++;
        }
 
-       /*
-        * Now set the rest of the table to uncached - use entry 0 as
-        * this will be uncached. Leave the last pair uninitialised as
-        * they are reserved by the hardware.
-        */
-       for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+       /* All remaining entries are also unused */
+       for (; i < table->n_entries / 2; i++) {
                *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
-               *cs++ = l3cc_combine(table, 0, 0);
+               *cs++ = l3cc_combine(table, unused_value, unused_value);
        }
 
        *cs++ = MI_NOOP;
@@ -391,26 +496,35 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
 {
        struct drm_i915_mocs_table table;
        unsigned int i;
+       u16 unused_value;
 
        if (!get_mocs_settings(dev_priv, &table))
                return;
 
-       for (i = 0; i < table.size/2; i++)
-               I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1));
+       /* Set unused values to PTE */
+       unused_value = table.table[I915_MOCS_PTE].l3cc_value;
+
+       for (i = 0; i < table.size / 2; i++) {
+               u16 low = get_entry_l3cc(&table, 2 * i);
+               u16 high = get_entry_l3cc(&table, 2 * i + 1);
+
+               I915_WRITE(GEN9_LNCFCMOCS(i),
+                          l3cc_combine(&table, low, high));
+       }
 
        /* Odd table size - 1 left over */
        if (table.size & 0x01) {
-               I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0));
+               u16 low = get_entry_l3cc(&table, 2 * i);
+
+               I915_WRITE(GEN9_LNCFCMOCS(i),
+                          l3cc_combine(&table, low, unused_value));
                i++;
        }
 
-       /*
-        * Now set the rest of the table to uncached - use entry 0 as
-        * this will be uncached. Leave the last pair as initialised as
-        * they are reserved by the hardware.
-        */
-       for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++)
-               I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0));
+       /* All remaining entries are also unused */
+       for (; i < table.n_entries / 2; i++)
+               I915_WRITE(GEN9_LNCFCMOCS(i),
+                          l3cc_combine(&table, unused_value, unused_value));
 }
 
 /**
index d89080d75b80ddfc21285e5ad2def13899430937..3d99d1271b2bc515a0468fc9445a3e98c8f7f919 100644 (file)
@@ -49,7 +49,6 @@
  * context handling keep the MOCS in step.
  */
 
-#include <drm/drmP.h>
 #include "i915_drv.h"
 
 int intel_rcs_context_init_mocs(struct i915_request *rq);
index b8f106d9ecf8b1be6286b2dfaeac1c5188629528..30ae96c5c97cb1caf4911dc85d0fff11dffba92c 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/firmware.h>
 #include <acpi/video.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "intel_opregion.h"
index 20ea7c99d13a06e73a52b8d5b69866b8f35a32ad..c0df1dbb0069e805face7c130cd9866791e2b77f 100644 (file)
@@ -25,8 +25,9 @@
  *
  * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
  */
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
+#include <drm/drm_fourcc.h>
+
 #include "i915_drv.h"
 #include "i915_reg.h"
 #include "intel_drv.h"
@@ -185,7 +186,7 @@ struct intel_overlay {
        struct overlay_registers __iomem *regs;
        u32 flip_addr;
        /* flip handling */
-       struct i915_gem_active last_flip;
+       struct i915_active_request last_flip;
 };
 
 static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
@@ -213,23 +214,23 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
 
 static void intel_overlay_submit_request(struct intel_overlay *overlay,
                                         struct i915_request *rq,
-                                        i915_gem_retire_fn retire)
+                                        i915_active_retire_fn retire)
 {
-       GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
-                                       &overlay->i915->drm.struct_mutex));
-       i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
-                                     &overlay->i915->drm.struct_mutex);
-       i915_gem_active_set(&overlay->last_flip, rq);
+       GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
+                                           &overlay->i915->drm.struct_mutex));
+       i915_active_request_set_retire_fn(&overlay->last_flip, retire,
+                                         &overlay->i915->drm.struct_mutex);
+       __i915_active_request_set(&overlay->last_flip, rq);
        i915_request_add(rq);
 }
 
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
                                         struct i915_request *rq,
-                                        i915_gem_retire_fn retire)
+                                        i915_active_retire_fn retire)
 {
        intel_overlay_submit_request(overlay, rq, retire);
-       return i915_gem_active_retire(&overlay->last_flip,
-                                     &overlay->i915->drm.struct_mutex);
+       return i915_active_request_retire(&overlay->last_flip,
+                                         &overlay->i915->drm.struct_mutex);
 }
 
 static struct i915_request *alloc_request(struct intel_overlay *overlay)
@@ -350,8 +351,9 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
        i915_vma_put(vma);
 }
 
-static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
-                                              struct i915_request *rq)
+static void
+intel_overlay_release_old_vid_tail(struct i915_active_request *active,
+                                  struct i915_request *rq)
 {
        struct intel_overlay *overlay =
                container_of(active, typeof(*overlay), last_flip);
@@ -359,7 +361,7 @@ static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
        intel_overlay_release_old_vma(overlay);
 }
 
-static void intel_overlay_off_tail(struct i915_gem_active *active,
+static void intel_overlay_off_tail(struct i915_active_request *active,
                                   struct i915_request *rq)
 {
        struct intel_overlay *overlay =
@@ -422,8 +424,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
  * We have to be careful not to repeat work forever an make forward progess. */
 static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
-       return i915_gem_active_retire(&overlay->last_flip,
-                                     &overlay->i915->drm.struct_mutex);
+       return i915_active_request_retire(&overlay->last_flip,
+                                         &overlay->i915->drm.struct_mutex);
 }
 
 /* Wait for pending overlay flip and release old frame.
@@ -479,8 +481,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
        if (!overlay)
                return;
 
-       intel_overlay_release_old_vid(overlay);
-
        overlay->old_xscale = 0;
        overlay->old_yscale = 0;
        overlay->crtc = NULL;
@@ -541,7 +541,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt
 {
        u32 sw;
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                sw = ALIGN((offset & 31) + width, 32);
        else
                sw = ALIGN((offset & 63) + width, 64);
@@ -778,7 +778,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                u32 oconfig;
 
                oconfig = OCONF_CC_OUT_8BIT;
-               if (IS_GEN4(dev_priv))
+               if (IS_GEN(dev_priv, 4))
                        oconfig |= OCONF_CSC_MODE_BT709;
                oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
@@ -1012,7 +1012,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
 
        if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
                return -EINVAL;
-       if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
+       if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
                return -EINVAL;
 
        tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1246,7 +1246,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
                attrs->contrast   = overlay->contrast;
                attrs->saturation = overlay->saturation;
 
-               if (!IS_GEN2(dev_priv)) {
+               if (!IS_GEN(dev_priv, 2)) {
                        attrs->gamma0 = I915_READ(OGAMC0);
                        attrs->gamma1 = I915_READ(OGAMC1);
                        attrs->gamma2 = I915_READ(OGAMC2);
@@ -1270,7 +1270,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
                update_reg_attrs(overlay, overlay->regs);
 
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
-                       if (IS_GEN2(dev_priv))
+                       if (IS_GEN(dev_priv, 2))
                                goto out_unlock;
 
                        if (overlay->active) {
@@ -1358,7 +1358,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
        overlay->contrast = 75;
        overlay->saturation = 146;
 
-       init_request_active(&overlay->last_flip, NULL);
+       INIT_ACTIVE_REQUEST(&overlay->last_flip);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
index e6cd7b55c0182425cb7eb3bc32c9250d8cf7f601..beca98d2b035b2a84f3425a674cfbb4a4966aa13 100644 (file)
@@ -563,7 +563,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
                pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
        }
 
-       if (IS_GEN4(dev_priv)) {
+       if (IS_GEN(dev_priv, 4)) {
                mask = BACKLIGHT_DUTY_CYCLE_MASK;
        } else {
                level <<= 1;
@@ -929,7 +929,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
         * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
         * that has backlight.
         */
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
 }
 
@@ -1087,20 +1087,11 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
        intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
 }
 
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
-                                 const struct drm_connector_state *conn_state)
+static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
+                                          const struct drm_connector_state *conn_state)
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_panel *panel = &connector->panel;
-       enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
-       if (!panel->backlight.present)
-               return;
-
-       DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
-
-       mutex_lock(&dev_priv->backlight_lock);
 
        WARN_ON(panel->backlight.max == 0);
 
@@ -1117,6 +1108,24 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
        panel->backlight.enabled = true;
        if (panel->backlight.device)
                panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+}
+
+void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
+                                 const struct drm_connector_state *conn_state)
+{
+       struct intel_connector *connector = to_intel_connector(conn_state->connector);
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_panel *panel = &connector->panel;
+       enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+
+       if (!panel->backlight.present)
+               return;
+
+       DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+       mutex_lock(&dev_priv->backlight_lock);
+
+       __intel_panel_enable_backlight(crtc_state, conn_state);
 
        mutex_unlock(&dev_priv->backlight_lock);
 }
@@ -1203,17 +1212,20 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
        struct intel_connector *connector = bl_get_data(bd);
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 hw_level;
-       int ret;
+       intel_wakeref_t wakeref;
+       int ret = 0;
 
-       intel_runtime_pm_get(dev_priv);
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               u32 hw_level;
 
-       hw_level = intel_panel_get_backlight(connector);
-       ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+               drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-       intel_runtime_pm_put(dev_priv);
+               hw_level = intel_panel_get_backlight(connector);
+               ret = scale_hw_to_user(connector,
+                                      hw_level, bd->props.max_brightness);
+
+               drm_modeset_unlock(&dev->mode_config.connection_mutex);
+       }
 
        return ret;
 }
@@ -1484,8 +1496,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_panel *panel = &connector->panel;
-       u32 pch_ctl1, pch_ctl2, val;
-       bool alt;
+       u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+       bool alt, cpu_mode;
 
        if (HAS_PCH_LPT(dev_priv))
                alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
@@ -1499,6 +1511,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
        pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
        panel->backlight.max = pch_ctl2 >> 16;
 
+       cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+
        if (!panel->backlight.max)
                panel->backlight.max = get_backlight_max_vbt(connector);
 
@@ -1507,12 +1521,28 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
 
        panel->backlight.min = get_backlight_min_vbt(connector);
 
-       val = lpt_get_backlight(connector);
+       panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+
+       cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+                  !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
+                  (cpu_ctl2 & BLM_PWM_ENABLE);
+       if (cpu_mode)
+               val = pch_get_backlight(connector);
+       else
+               val = lpt_get_backlight(connector);
        val = intel_panel_compute_brightness(connector, val);
        panel->backlight.level = clamp(val, panel->backlight.min,
                                       panel->backlight.max);
 
-       panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+       if (cpu_mode) {
+               DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
+
+               /* Write converted CPU PWM value to PCH override register */
+               lpt_set_backlight(connector->base.state, panel->backlight.level);
+               I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+
+               I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
+       }
 
        return 0;
 }
@@ -1557,7 +1587,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
 
        ctl = I915_READ(BLC_PWM_CTL);
 
-       if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+       if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
                panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
 
        if (IS_PINEVIEW(dev_priv))
@@ -1773,6 +1803,24 @@ static int pwm_setup_backlight(struct intel_connector *connector,
        return 0;
 }
 
+void intel_panel_update_backlight(struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *crtc_state,
+                                 const struct drm_connector_state *conn_state)
+{
+       struct intel_connector *connector = to_intel_connector(conn_state->connector);
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_panel *panel = &connector->panel;
+
+       if (!panel->backlight.present)
+               return;
+
+       mutex_lock(&dev_priv->backlight_lock);
+       if (!panel->backlight.enabled)
+               __intel_panel_enable_backlight(crtc_state, conn_state);
+
+       mutex_unlock(&dev_priv->backlight_lock);
+}
+
 int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
@@ -1886,7 +1934,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
                        panel->backlight.get = vlv_get_backlight;
                        panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
                }
-       } else if (IS_GEN4(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 4)) {
                panel->backlight.setup = i965_setup_backlight;
                panel->backlight.enable = i965_enable_backlight;
                panel->backlight.disable = i965_disable_backlight;
index f3c9010e332a0eab4a7ca440e547307431dc0bb5..a8554dc4f196fb7a626173a4e8714e4ca4fe78f3 100644 (file)
@@ -44,7 +44,7 @@ static const char * const pipe_crc_sources[] = {
 };
 
 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
-                                uint32_t *val)
+                                u32 *val)
 {
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
                *source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -120,7 +120,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
 static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                enum pipe pipe,
                                enum intel_pipe_crc_source *source,
-                               uint32_t *val)
+                               u32 *val)
 {
        bool need_stable_symbols = false;
 
@@ -165,7 +165,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
         *   - DisplayPort scrambling: used for EMI reduction
         */
        if (need_stable_symbols) {
-               uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+               u32 tmp = I915_READ(PORT_DFT2_G4X);
 
                tmp |= DC_BALANCE_RESET_VLV;
                switch (pipe) {
@@ -190,7 +190,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                 enum pipe pipe,
                                 enum intel_pipe_crc_source *source,
-                                uint32_t *val)
+                                u32 *val)
 {
        bool need_stable_symbols = false;
 
@@ -244,7 +244,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
         *   - DisplayPort scrambling: used for EMI reduction
         */
        if (need_stable_symbols) {
-               uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+               u32 tmp = I915_READ(PORT_DFT2_G4X);
 
                WARN_ON(!IS_G4X(dev_priv));
 
@@ -265,7 +265,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
                                         enum pipe pipe)
 {
-       uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+       u32 tmp = I915_READ(PORT_DFT2_G4X);
 
        switch (pipe) {
        case PIPE_A:
@@ -289,7 +289,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
                                         enum pipe pipe)
 {
-       uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+       u32 tmp = I915_READ(PORT_DFT2_G4X);
 
        if (pipe == PIPE_A)
                tmp &= ~PIPE_A_SCRAMBLE_RESET;
@@ -304,7 +304,7 @@ static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 }
 
 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
-                               uint32_t *val)
+                               u32 *val)
 {
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
                *source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -392,7 +392,7 @@ unlock:
 static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                enum pipe pipe,
                                enum intel_pipe_crc_source *source,
-                               uint32_t *val,
+                               u32 *val,
                                bool set_wa)
 {
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -427,13 +427,13 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
                               enum intel_pipe_crc_source *source, u32 *val,
                               bool set_wa)
 {
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                return i8xx_pipe_crc_ctl_reg(source, val);
        else if (INTEL_GEN(dev_priv) < 5)
                return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
-       else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+       else if (IS_GEN_RANGE(dev_priv, 5, 6))
                return ilk_pipe_crc_ctl_reg(source, val);
        else
                return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
@@ -544,13 +544,13 @@ static int
 intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
                          const enum intel_pipe_crc_source source)
 {
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                return i8xx_crc_source_valid(dev_priv, source);
        else if (INTEL_GEN(dev_priv) < 5)
                return i9xx_crc_source_valid(dev_priv, source);
        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return vlv_crc_source_valid(dev_priv, source);
-       else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+       else if (IS_GEN_RANGE(dev_priv, 5, 6))
                return ilk_crc_source_valid(dev_priv, source);
        else
                return ivb_crc_source_valid(dev_priv, source);
@@ -589,6 +589,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
        enum intel_display_power_domain power_domain;
        enum intel_pipe_crc_source source;
+       intel_wakeref_t wakeref;
        u32 val = 0; /* shut up gcc */
        int ret = 0;
 
@@ -598,7 +599,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
        }
 
        power_domain = POWER_DOMAIN_PIPE(crtc->index);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref) {
                DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
                return -EIO;
        }
@@ -624,7 +626,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
        pipe_crc->skipped = 0;
 
 out:
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
index a26b4eddda252659d94f5c6cab89b895ddf348a9..0f15685529a01a4e10e337d957062d544b645f0d 100644 (file)
@@ -480,7 +480,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
        int sprite0_start, sprite1_start;
 
        switch (pipe) {
-               uint32_t dsparb, dsparb2, dsparb3;
+               u32 dsparb, dsparb2, dsparb3;
        case PIPE_A:
                dsparb = I915_READ(DSPARB);
                dsparb2 = I915_READ(DSPARB2);
@@ -513,7 +513,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
                              enum i9xx_plane_id i9xx_plane)
 {
-       uint32_t dsparb = I915_READ(DSPARB);
+       u32 dsparb = I915_READ(DSPARB);
        int size;
 
        size = dsparb & 0x7f;
@@ -529,7 +529,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
                              enum i9xx_plane_id i9xx_plane)
 {
-       uint32_t dsparb = I915_READ(DSPARB);
+       u32 dsparb = I915_READ(DSPARB);
        int size;
 
        size = dsparb & 0x1ff;
@@ -546,7 +546,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
                              enum i9xx_plane_id i9xx_plane)
 {
-       uint32_t dsparb = I915_READ(DSPARB);
+       u32 dsparb = I915_READ(DSPARB);
        int size;
 
        size = dsparb & 0x7f;
@@ -667,9 +667,9 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
                                     unsigned int cpp,
                                     unsigned int latency)
 {
-       uint64_t ret;
+       u64 ret;
 
-       ret = (uint64_t) pixel_rate * cpp * latency;
+       ret = (u64)pixel_rate * cpp * latency;
        ret = DIV_ROUND_UP_ULL(ret, 10000);
 
        return ret;
@@ -1089,9 +1089,9 @@ static int g4x_fbc_fifo_size(int level)
        }
 }
 
-static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
-                              const struct intel_plane_state *plane_state,
-                              int level)
+static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+                         const struct intel_plane_state *plane_state,
+                         int level)
 {
        struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1188,9 +1188,9 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
        return dirty;
 }
 
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
-                                  const struct intel_plane_state *pstate,
-                                  uint32_t pri_val);
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+                             const struct intel_plane_state *pstate,
+                             u32 pri_val);
 
 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
                                     const struct intel_plane_state *plane_state)
@@ -1399,10 +1399,9 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
        return 0;
 }
 
-static int g4x_compute_intermediate_wm(struct drm_device *dev,
-                                      struct intel_crtc *crtc,
-                                      struct intel_crtc_state *new_crtc_state)
+static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
        struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
        const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
        struct intel_atomic_state *intel_state =
@@ -1599,9 +1598,9 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
        }
 }
 
-static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
-                                    const struct intel_plane_state *plane_state,
-                                    int level)
+static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+                               const struct intel_plane_state *plane_state,
+                               int level)
 {
        struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1969,7 +1968,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
        spin_lock(&dev_priv->uncore.lock);
 
        switch (crtc->pipe) {
-               uint32_t dsparb, dsparb2, dsparb3;
+               u32 dsparb, dsparb2, dsparb3;
        case PIPE_A:
                dsparb = I915_READ_FW(DSPARB);
                dsparb2 = I915_READ_FW(DSPARB2);
@@ -2032,10 +2031,9 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 
 #undef VLV_FIFO
 
-static int vlv_compute_intermediate_wm(struct drm_device *dev,
-                                      struct intel_crtc *crtc,
-                                      struct intel_crtc_state *new_crtc_state)
+static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
        struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
        const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
        struct intel_atomic_state *intel_state =
@@ -2264,8 +2262,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
        const struct intel_watermark_params *wm_info;
-       uint32_t fwater_lo;
-       uint32_t fwater_hi;
+       u32 fwater_lo;
+       u32 fwater_hi;
        int cwm, srwm = 1;
        int fifo_size;
        int planea_wm, planeb_wm;
@@ -2273,7 +2271,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 
        if (IS_I945GM(dev_priv))
                wm_info = &i945_wm_info;
-       else if (!IS_GEN2(dev_priv))
+       else if (!IS_GEN(dev_priv, 2))
                wm_info = &i915_wm_info;
        else
                wm_info = &i830_a_wm_info;
@@ -2287,7 +2285,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
                        crtc->base.primary->state->fb;
                int cpp;
 
-               if (IS_GEN2(dev_priv))
+               if (IS_GEN(dev_priv, 2))
                        cpp = 4;
                else
                        cpp = fb->format->cpp[0];
@@ -2302,7 +2300,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
                        planea_wm = wm_info->max_wm;
        }
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                wm_info = &i830_bc_wm_info;
 
        fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
@@ -2314,7 +2312,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
                        crtc->base.primary->state->fb;
                int cpp;
 
-               if (IS_GEN2(dev_priv))
+               if (IS_GEN(dev_priv, 2))
                        cpp = 4;
                else
                        cpp = fb->format->cpp[0];
@@ -2408,7 +2406,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
        struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
        struct intel_crtc *crtc;
        const struct drm_display_mode *adjusted_mode;
-       uint32_t fwater_lo;
+       u32 fwater_lo;
        int planea_wm;
 
        crtc = single_enabled_crtc(dev_priv);
@@ -2457,8 +2455,7 @@ static unsigned int ilk_wm_method2(unsigned int pixel_rate,
        return ret;
 }
 
-static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
-                          uint8_t cpp)
+static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
 {
        /*
         * Neither of these should be possible since this function shouldn't be
@@ -2475,22 +2472,21 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
 }
 
 struct ilk_wm_maximums {
-       uint16_t pri;
-       uint16_t spr;
-       uint16_t cur;
-       uint16_t fbc;
+       u16 pri;
+       u16 spr;
+       u16 cur;
+       u16 fbc;
 };
 
 /*
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
-                                  const struct intel_plane_state *pstate,
-                                  uint32_t mem_value,
-                                  bool is_lp)
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
+                             const struct intel_plane_state *pstate,
+                             u32 mem_value, bool is_lp)
 {
-       uint32_t method1, method2;
+       u32 method1, method2;
        int cpp;
 
        if (mem_value == 0)
@@ -2518,11 +2514,11 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
-                                  const struct intel_plane_state *pstate,
-                                  uint32_t mem_value)
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
+                             const struct intel_plane_state *pstate,
+                             u32 mem_value)
 {
-       uint32_t method1, method2;
+       u32 method1, method2;
        int cpp;
 
        if (mem_value == 0)
@@ -2545,9 +2541,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
-                                  const struct intel_plane_state *pstate,
-                                  uint32_t mem_value)
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
+                             const struct intel_plane_state *pstate,
+                             u32 mem_value)
 {
        int cpp;
 
@@ -2565,9 +2561,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 }
 
 /* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
-                                  const struct intel_plane_state *pstate,
-                                  uint32_t pri_val)
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+                             const struct intel_plane_state *pstate,
+                             u32 pri_val)
 {
        int cpp;
 
@@ -2626,13 +2622,12 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
 }
 
 /* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
                                     int level,
                                     const struct intel_wm_config *config,
                                     enum intel_ddb_partitioning ddb_partitioning,
                                     bool is_sprite)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
 
        /* if sprites aren't enabled, sprites get nothing */
@@ -2668,7 +2663,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
 }
 
 /* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
                                      int level,
                                      const struct intel_wm_config *config)
 {
@@ -2677,19 +2672,19 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
                return 64;
 
        /* otherwise just report max that registers can hold */
-       return ilk_cursor_wm_reg_max(to_i915(dev), level);
+       return ilk_cursor_wm_reg_max(dev_priv, level);
 }
 
-static void ilk_compute_wm_maximums(const struct drm_device *dev,
+static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
                                    int level,
                                    const struct intel_wm_config *config,
                                    enum intel_ddb_partitioning ddb_partitioning,
                                    struct ilk_wm_maximums *max)
 {
-       max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
-       max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
-       max->cur = ilk_cursor_wm_max(dev, level, config);
-       max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
+       max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
+       max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
+       max->cur = ilk_cursor_wm_max(dev_priv, level, config);
+       max->fbc = ilk_fbc_wm_reg_max(dev_priv);
 }
 
 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
@@ -2734,9 +2729,9 @@ static bool ilk_validate_wm_level(int level,
                        DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
                                      level, result->cur_val, max->cur);
 
-               result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
-               result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
-               result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+               result->pri_val = min_t(u32, result->pri_val, max->pri);
+               result->spr_val = min_t(u32, result->spr_val, max->spr);
+               result->cur_val = min_t(u32, result->cur_val, max->cur);
                result->enable = true;
        }
 
@@ -2752,9 +2747,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
                                 const struct intel_plane_state *curstate,
                                 struct intel_wm_level *result)
 {
-       uint16_t pri_latency = dev_priv->wm.pri_latency[level];
-       uint16_t spr_latency = dev_priv->wm.spr_latency[level];
-       uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+       u16 pri_latency = dev_priv->wm.pri_latency[level];
+       u16 spr_latency = dev_priv->wm.spr_latency[level];
+       u16 cur_latency = dev_priv->wm.cur_latency[level];
 
        /* WM1+ latency values stored in 0.5us units */
        if (level > 0) {
@@ -2778,7 +2773,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
        result->enable = true;
 }
 
-static uint32_t
+static u32
 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 {
        const struct intel_atomic_state *intel_state =
@@ -2807,10 +2802,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 }
 
 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
-                                 uint16_t wm[8])
+                                 u16 wm[8])
 {
        if (INTEL_GEN(dev_priv) >= 9) {
-               uint32_t val;
+               u32 val;
                int ret, i;
                int level, max_level = ilk_wm_max_level(dev_priv);
 
@@ -2894,7 +2889,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
                        wm[0] += 1;
 
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               uint64_t sskpd = I915_READ64(MCH_SSKPD);
+               u64 sskpd = I915_READ64(MCH_SSKPD);
 
                wm[0] = (sskpd >> 56) & 0xFF;
                if (wm[0] == 0)
@@ -2904,14 +2899,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
                wm[3] = (sskpd >> 20) & 0x1FF;
                wm[4] = (sskpd >> 32) & 0x1FF;
        } else if (INTEL_GEN(dev_priv) >= 6) {
-               uint32_t sskpd = I915_READ(MCH_SSKPD);
+               u32 sskpd = I915_READ(MCH_SSKPD);
 
                wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
                wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
                wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
                wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
        } else if (INTEL_GEN(dev_priv) >= 5) {
-               uint32_t mltr = I915_READ(MLTR_ILK);
+               u32 mltr = I915_READ(MLTR_ILK);
 
                /* ILK primary LP0 latency is 700 ns */
                wm[0] = 7;
@@ -2923,18 +2918,18 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 }
 
 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
-                                      uint16_t wm[5])
+                                      u16 wm[5])
 {
        /* ILK sprite LP0 latency is 1300 ns */
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                wm[0] = 13;
 }
 
 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
-                                      uint16_t wm[5])
+                                      u16 wm[5])
 {
        /* ILK cursor LP0 latency is 1300 ns */
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                wm[0] = 13;
 }
 
@@ -2953,7 +2948,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
 
 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
                                   const char *name,
-                                  const uint16_t wm[8])
+                                  const u16 wm[8])
 {
        int level, max_level = ilk_wm_max_level(dev_priv);
 
@@ -2982,7 +2977,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 }
 
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
-                                   uint16_t wm[5], uint16_t min)
+                                   u16 wm[5], u16 min)
 {
        int level, max_level = ilk_wm_max_level(dev_priv);
 
@@ -2991,7 +2986,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 
        wm[0] = max(wm[0], min);
        for (level = 1; level <= max_level; level++)
-               wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+               wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
 
        return true;
 }
@@ -3061,7 +3056,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 
-       if (IS_GEN6(dev_priv)) {
+       if (IS_GEN(dev_priv, 6)) {
                snb_wm_latency_quirk(dev_priv);
                snb_wm_lp3_irq_quirk(dev_priv);
        }
@@ -3073,7 +3068,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
 }
 
-static bool ilk_validate_pipe_wm(struct drm_device *dev,
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
                                 struct intel_pipe_wm *pipe_wm)
 {
        /* LP0 watermark maximums depend on this pipe alone */
@@ -3085,7 +3080,7 @@ static bool ilk_validate_pipe_wm(struct drm_device *dev,
        struct ilk_wm_maximums max;
 
        /* LP0 watermarks always use 1/2 DDB partitioning */
-       ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
 
        /* At least LP0 must be valid */
        if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
@@ -3150,7 +3145,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
 
-       if (!ilk_validate_pipe_wm(dev, pipe_wm))
+       if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
                return -EINVAL;
 
        ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
@@ -3180,17 +3175,17 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
  * state and the new state.  These can be programmed to the hardware
  * immediately.
  */
-static int ilk_compute_intermediate_wm(struct drm_device *dev,
-                                      struct intel_crtc *intel_crtc,
-                                      struct intel_crtc_state *newstate)
+static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
 {
+       struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
        struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
        struct intel_atomic_state *intel_state =
                to_intel_atomic_state(newstate->base.state);
        const struct intel_crtc_state *oldstate =
                intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
        const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
-       int level, max_level = ilk_wm_max_level(to_i915(dev));
+       int level, max_level = ilk_wm_max_level(dev_priv);
 
        /*
         * Start with the final, target watermarks, then combine with the
@@ -3223,7 +3218,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
         * there's no safe way to transition from the old state to
         * the new state, so we need to fail the atomic transaction.
         */
-       if (!ilk_validate_pipe_wm(dev, a))
+       if (!ilk_validate_pipe_wm(dev_priv, a))
                return -EINVAL;
 
        /*
@@ -3239,7 +3234,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
 /*
  * Merge the watermarks from all active pipes for a specific level.
  */
-static void ilk_merge_wm_level(struct drm_device *dev,
+static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
                               int level,
                               struct intel_wm_level *ret_wm)
 {
@@ -3247,7 +3242,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
 
        ret_wm->enable = true;
 
-       for_each_intel_crtc(dev, intel_crtc) {
+       for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
                const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
                const struct intel_wm_level *wm = &active->wm[level];
 
@@ -3272,12 +3267,11 @@ static void ilk_merge_wm_level(struct drm_device *dev,
 /*
  * Merge all low power watermarks for all active pipes.
  */
-static void ilk_wm_merge(struct drm_device *dev,
+static void ilk_wm_merge(struct drm_i915_private *dev_priv,
                         const struct intel_wm_config *config,
                         const struct ilk_wm_maximums *max,
                         struct intel_pipe_wm *merged)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        int level, max_level = ilk_wm_max_level(dev_priv);
        int last_enabled_level = max_level;
 
@@ -3293,7 +3287,7 @@ static void ilk_wm_merge(struct drm_device *dev,
        for (level = 1; level <= max_level; level++) {
                struct intel_wm_level *wm = &merged->wm[level];
 
-               ilk_merge_wm_level(dev, level, wm);
+               ilk_merge_wm_level(dev_priv, level, wm);
 
                if (level > last_enabled_level)
                        wm->enable = false;
@@ -3318,7 +3312,7 @@ static void ilk_wm_merge(struct drm_device *dev,
         * What we should check here is whether FBC can be
         * enabled sometime later.
         */
-       if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
+       if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
            intel_fbc_is_active(dev_priv)) {
                for (level = 2; level <= max_level; level++) {
                        struct intel_wm_level *wm = &merged->wm[level];
@@ -3335,22 +3329,20 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
 }
 
 /* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+                                     int level)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                return 2 * level;
        else
                return dev_priv->wm.pri_latency[level];
 }
 
-static void ilk_compute_wm_results(struct drm_device *dev,
+static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
                                   const struct intel_pipe_wm *merged,
                                   enum intel_ddb_partitioning partitioning,
                                   struct ilk_wm_values *results)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc;
        int level, wm_lp;
 
@@ -3370,7 +3362,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
                 * disabled. Doing otherwise could cause underruns.
                 */
                results->wm_lp[wm_lp - 1] =
-                       (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
+                       (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
                        (r->pri_val << WM1_LP_SR_SHIFT) |
                        r->cur_val;
 
@@ -3396,7 +3388,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
        }
 
        /* LP0 register values */
-       for_each_intel_crtc(dev, intel_crtc) {
+       for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
                enum pipe pipe = intel_crtc->pipe;
                const struct intel_wm_level *r =
                        &intel_crtc->wm.active.ilk.wm[0];
@@ -3415,11 +3407,12 @@ static void ilk_compute_wm_results(struct drm_device *dev,
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
-                                                 struct intel_pipe_wm *r1,
-                                                 struct intel_pipe_wm *r2)
+static struct intel_pipe_wm *
+ilk_find_best_result(struct drm_i915_private *dev_priv,
+                    struct intel_pipe_wm *r1,
+                    struct intel_pipe_wm *r2)
 {
-       int level, max_level = ilk_wm_max_level(to_i915(dev));
+       int level, max_level = ilk_wm_max_level(dev_priv);
        int level1 = 0, level2 = 0;
 
        for (level = 1; level <= max_level; level++) {
@@ -3540,7 +3533,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
 {
        struct ilk_wm_values *previous = &dev_priv->wm.hw;
        unsigned int dirty;
-       uint32_t val;
+       u32 val;
 
        dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
        if (!dirty)
@@ -3638,14 +3631,9 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
  * FIXME: We still don't have the proper code detect if we need to apply the WA,
  * so assume we'll always need it in order to avoid underruns.
  */
-static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
-       if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
-               return true;
-
-       return false;
+       return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
 }
 
 static bool
@@ -3677,25 +3665,25 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
        if (dev_priv->sagv_status == I915_SAGV_ENABLED)
                return 0;
 
-       DRM_DEBUG_KMS("Enabling the SAGV\n");
+       DRM_DEBUG_KMS("Enabling SAGV\n");
        mutex_lock(&dev_priv->pcu_lock);
 
        ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
                                      GEN9_SAGV_ENABLE);
 
-       /* We don't need to wait for the SAGV when enabling */
+       /* We don't need to wait for SAGV when enabling */
        mutex_unlock(&dev_priv->pcu_lock);
 
        /*
         * Some skl systems, pre-release machines in particular,
-        * don't actually have an SAGV.
+        * don't actually have SAGV.
         */
        if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
                DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
                dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
                return 0;
        } else if (ret < 0) {
-               DRM_ERROR("Failed to enable the SAGV\n");
+               DRM_ERROR("Failed to enable SAGV\n");
                return ret;
        }
 
@@ -3714,7 +3702,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
        if (dev_priv->sagv_status == I915_SAGV_DISABLED)
                return 0;
 
-       DRM_DEBUG_KMS("Disabling the SAGV\n");
+       DRM_DEBUG_KMS("Disabling SAGV\n");
        mutex_lock(&dev_priv->pcu_lock);
 
        /* bspec says to keep retrying for at least 1 ms */
@@ -3726,14 +3714,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
 
        /*
         * Some skl systems, pre-release machines in particular,
-        * don't actually have an SAGV.
+        * don't actually have SAGV.
         */
        if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
                DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
                dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
                return 0;
        } else if (ret < 0) {
-               DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+               DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
                return ret;
        }
 
@@ -3756,15 +3744,15 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
        if (!intel_has_sagv(dev_priv))
                return false;
 
-       if (IS_GEN9(dev_priv))
+       if (IS_GEN(dev_priv, 9))
                sagv_block_time_us = 30;
-       else if (IS_GEN10(dev_priv))
+       else if (IS_GEN(dev_priv, 10))
                sagv_block_time_us = 20;
        else
                sagv_block_time_us = 10;
 
        /*
-        * SKL+ workaround: bspec recommends we disable the SAGV when we have
+        * SKL+ workaround: bspec recommends we disable SAGV when we have
         * more then one pipe enabled
         *
         * If there are no active CRTCs, no additional checks need be performed
@@ -3797,7 +3785,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 
                latency = dev_priv->wm.skl_latency[level];
 
-               if (skl_needs_memory_bw_wa(intel_state) &&
+               if (skl_needs_memory_bw_wa(dev_priv) &&
                    plane->base.state->fb->modifier ==
                    I915_FORMAT_MOD_X_TILED)
                        latency += 15;
@@ -3805,7 +3793,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
                /*
                 * If any of the planes on this pipe don't enable wm levels that
                 * incur memory latencies higher than sagv_block_time_us we
-                * can't enable the SAGV.
+                * can't enable SAGV.
                 */
                if (latency < sagv_block_time_us)
                        return false;
@@ -3834,8 +3822,13 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 
        /*
         * 12GB/s is maximum BW supported by single DBuf slice.
+        *
+        * FIXME dbuf slice code is broken:
+        * - must wait for planes to stop using the slice before powering it off
+        * - plane straddling both slices is illegal in multi-pipe scenarios
+        * - should validate we stay within the hw bandwidth limits
         */
-       if (num_active > 1 || total_data_bw >= GBps(12)) {
+       if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
                ddb->enabled_slices = 2;
        } else {
                ddb->enabled_slices = 1;
@@ -3934,14 +3927,9 @@ static unsigned int skl_cursor_allocation(int num_active)
 static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
                                       struct skl_ddb_entry *entry, u32 reg)
 {
-       u16 mask;
 
-       if (INTEL_GEN(dev_priv) >= 11)
-               mask = ICL_DDB_ENTRY_MASK;
-       else
-               mask = SKL_DDB_ENTRY_MASK;
-       entry->start = reg & mask;
-       entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;
+       entry->start = reg & DDB_ENTRY_MASK;
+       entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
 
        if (entry->end)
                entry->end += 1;
@@ -3994,10 +3982,12 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum intel_display_power_domain power_domain;
        enum pipe pipe = crtc->pipe;
+       intel_wakeref_t wakeref;
        enum plane_id plane_id;
 
        power_domain = POWER_DOMAIN_PIPE(pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return;
 
        for_each_plane_id_on_crtc(crtc, plane_id)
@@ -4006,7 +3996,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
                                           &ddb_y[plane_id],
                                           &ddb_uv[plane_id]);
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 }
 
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
@@ -4036,7 +4026,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
                           const struct intel_plane_state *pstate)
 {
        struct intel_plane *plane = to_intel_plane(pstate->base.plane);
-       uint32_t src_w, src_h, dst_w, dst_h;
+       u32 src_w, src_h, dst_w, dst_h;
        uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
        uint_fixed_16_16_t downscale_h, downscale_w;
 
@@ -4082,8 +4072,8 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
                return pipe_downscale;
 
        if (crtc_state->pch_pfit.enabled) {
-               uint32_t src_w, src_h, dst_w, dst_h;
-               uint32_t pfit_size = crtc_state->pch_pfit.size;
+               u32 src_w, src_h, dst_w, dst_h;
+               u32 pfit_size = crtc_state->pch_pfit.size;
                uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
                uint_fixed_16_16_t downscale_h, downscale_w;
 
@@ -4116,7 +4106,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
        const struct drm_plane_state *pstate;
        struct intel_plane_state *intel_pstate;
        int crtc_clock, dotclk;
-       uint32_t pipe_max_pixel_rate;
+       u32 pipe_max_pixel_rate;
        uint_fixed_16_16_t pipe_downscale;
        uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
 
@@ -4172,8 +4162,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 {
        struct intel_plane *intel_plane =
                to_intel_plane(intel_pstate->base.plane);
-       uint32_t data_rate;
-       uint32_t width = 0, height = 0;
+       u32 data_rate;
+       u32 width = 0, height = 0;
        struct drm_framebuffer *fb;
        u32 format;
        uint_fixed_16_16_t down_scale_amount;
@@ -4306,102 +4296,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
        return total_data_rate;
 }
 
-static uint16_t
-skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
-{
-       struct drm_framebuffer *fb = pstate->fb;
-       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
-       uint32_t src_w, src_h;
-       uint32_t min_scanlines = 8;
-       uint8_t plane_bpp;
-
-       if (WARN_ON(!fb))
-               return 0;
-
-       /* For packed formats, and uv-plane, return 0 */
-       if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
-               return 0;
-
-       /* For Non Y-tile return 8-blocks */
-       if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
-           fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
-           fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
-           fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
-               return 8;
-
-       /*
-        * Src coordinates are already rotated by 270 degrees for
-        * the 90/270 degree plane rotation cases (to match the
-        * GTT mapping), hence no need to account for rotation here.
-        */
-       src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
-       src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
-
-       /* Halve UV plane width and height for NV12 */
-       if (plane == 1) {
-               src_w /= 2;
-               src_h /= 2;
-       }
-
-       plane_bpp = fb->format->cpp[plane];
-
-       if (drm_rotation_90_or_270(pstate->rotation)) {
-               switch (plane_bpp) {
-               case 1:
-                       min_scanlines = 32;
-                       break;
-               case 2:
-                       min_scanlines = 16;
-                       break;
-               case 4:
-                       min_scanlines = 8;
-                       break;
-               case 8:
-                       min_scanlines = 4;
-                       break;
-               default:
-                       WARN(1, "Unsupported pixel depth %u for rotation",
-                            plane_bpp);
-                       min_scanlines = 32;
-               }
-       }
-
-       return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
-}
-
-static void
-skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
-                uint16_t *minimum, uint16_t *uv_minimum)
-{
-       const struct drm_plane_state *pstate;
-       struct drm_plane *plane;
-
-       drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
-               enum plane_id plane_id = to_intel_plane(plane)->id;
-               struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
-
-               if (plane_id == PLANE_CURSOR)
-                       continue;
-
-               /* slave plane must be invisible and calculated from master */
-               if (!pstate->visible || WARN_ON(plane_state->slave))
-                       continue;
-
-               if (!plane_state->linked_plane) {
-                       minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
-                       uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
-               } else {
-                       enum plane_id y_plane_id =
-                               plane_state->linked_plane->id;
-
-                       minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
-                       minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
-               }
-       }
-
-       minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
-}
-
 static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                      struct skl_ddb_allocation *ddb /* out */)
@@ -4411,15 +4305,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
-       uint16_t alloc_size, start;
-       uint16_t minimum[I915_MAX_PLANES] = {};
-       uint16_t uv_minimum[I915_MAX_PLANES] = {};
+       struct skl_plane_wm *wm;
+       u16 alloc_size, start = 0;
+       u16 total[I915_MAX_PLANES] = {};
+       u16 uv_total[I915_MAX_PLANES] = {};
        u64 total_data_rate;
        enum plane_id plane_id;
        int num_active;
        u64 plane_data_rate[I915_MAX_PLANES] = {};
        u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
-       uint16_t total_min_blocks = 0;
+       u32 blocks;
+       int level;
 
        /* Clear the partitioning for disabled planes. */
        memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
@@ -4449,81 +4345,135 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
        if (alloc_size == 0)
                return 0;
 
-       skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum);
+       /* Allocate fixed number of blocks for cursor. */
+       total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+       alloc_size -= total[PLANE_CURSOR];
+       cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+               alloc->end - total[PLANE_CURSOR];
+       cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+
+       if (total_data_rate == 0)
+               return 0;
 
        /*
-        * 1. Allocate the mininum required blocks for each active plane
-        * and allocate the cursor, it doesn't require extra allocation
-        * proportional to the data rate.
+        * Find the highest watermark level for which we can satisfy the block
+        * requirement of active planes.
         */
+       for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
+               blocks = 0;
+               for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+                       if (plane_id == PLANE_CURSOR)
+                               continue;
 
-       for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-               total_min_blocks += minimum[plane_id];
-               total_min_blocks += uv_minimum[plane_id];
+                       wm = &cstate->wm.skl.optimal.planes[plane_id];
+                       blocks += wm->wm[level].min_ddb_alloc;
+                       blocks += wm->uv_wm[level].min_ddb_alloc;
+               }
+
+               if (blocks < alloc_size) {
+                       alloc_size -= blocks;
+                       break;
+               }
        }
 
-       if (total_min_blocks > alloc_size) {
+       if (level < 0) {
                DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
-               DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
-                                                       alloc_size);
+               DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
+                             alloc_size);
                return -EINVAL;
        }
 
-       alloc_size -= total_min_blocks;
-       cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
-       cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
-
        /*
-        * 2. Distribute the remaining space in proportion to the amount of
-        * data each plane needs to fetch from memory.
-        *
-        * FIXME: we may not allocate every single block here.
+        * Grant each plane the blocks it requires at the highest achievable
+        * watermark level, plus an extra share of the leftover blocks
+        * proportional to its relative data rate.
         */
-       if (total_data_rate == 0)
-               return 0;
-
-       start = alloc->start;
        for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-               u64 data_rate, uv_data_rate;
-               uint16_t plane_blocks, uv_plane_blocks;
+               u64 rate;
+               u16 extra;
 
                if (plane_id == PLANE_CURSOR)
                        continue;
 
-               data_rate = plane_data_rate[plane_id];
-
                /*
-                * allocation for (packed formats) or (uv-plane part of planar format):
-                * promote the expression to 64 bits to avoid overflowing, the
-                * result is < available as data_rate / total_data_rate < 1
+                * We've accounted for all active planes; remaining planes are
+                * all disabled.
                 */
-               plane_blocks = minimum[plane_id];
-               plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
+               if (total_data_rate == 0)
+                       break;
 
-               /* Leave disabled planes at (0,0) */
-               if (data_rate) {
-                       cstate->wm.skl.plane_ddb_y[plane_id].start = start;
-                       cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
-               }
+               wm = &cstate->wm.skl.optimal.planes[plane_id];
 
-               start += plane_blocks;
+               rate = plane_data_rate[plane_id];
+               extra = min_t(u16, alloc_size,
+                             DIV64_U64_ROUND_UP(alloc_size * rate,
+                                                total_data_rate));
+               total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
+               alloc_size -= extra;
+               total_data_rate -= rate;
 
-               /* Allocate DDB for UV plane for planar format/NV12 */
-               uv_data_rate = uv_plane_data_rate[plane_id];
+               if (total_data_rate == 0)
+                       break;
+
+               rate = uv_plane_data_rate[plane_id];
+               extra = min_t(u16, alloc_size,
+                             DIV64_U64_ROUND_UP(alloc_size * rate,
+                                                total_data_rate));
+               uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
+               alloc_size -= extra;
+               total_data_rate -= rate;
+       }
+       WARN_ON(alloc_size != 0 || total_data_rate != 0);
+
+       /* Set the actual DDB start/end points for each plane */
+       start = alloc->start;
+       for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+               struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
 
-               uv_plane_blocks = uv_minimum[plane_id];
-               uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+               if (plane_id == PLANE_CURSOR)
+                       continue;
+
+               plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
+               uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
 
                /* Gen11+ uses a separate plane for UV watermarks */
-               WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
+               WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
 
-               if (uv_data_rate) {
-                       cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
-                       cstate->wm.skl.plane_ddb_uv[plane_id].end =
-                               start + uv_plane_blocks;
+               /* Leave disabled planes at (0,0) */
+               if (total[plane_id]) {
+                       plane_alloc->start = start;
+                       start += total[plane_id];
+                       plane_alloc->end = start;
                }
 
-               start += uv_plane_blocks;
+               if (uv_total[plane_id]) {
+                       uv_plane_alloc->start = start;
+                       start += uv_total[plane_id];
+                       uv_plane_alloc->end = start;
+               }
+       }
+
+       /*
+        * When we calculated watermark values we didn't know how high
+        * of a level we'd actually be able to hit, so we just marked
+        * all levels as "enabled."  Go back now and disable the ones
+        * that aren't actually possible.
+        */
+       for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
+               for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+                       wm = &cstate->wm.skl.optimal.planes[plane_id];
+                       memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+               }
+       }
+
+       /*
+        * Go back and disable the transition watermark if it turns out we
+        * don't have enough DDB blocks for it.
+        */
+       for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+               wm = &cstate->wm.skl.optimal.planes[plane_id];
+               if (wm->trans_wm.plane_res_b >= total[plane_id])
+                       memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
        }
 
        return 0;
@@ -4536,10 +4486,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
 */
 static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
-              uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size)
+skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
+              u8 cpp, u32 latency, u32 dbuf_block_size)
 {
-       uint32_t wm_intermediate_val;
+       u32 wm_intermediate_val;
        uint_fixed_16_16_t ret;
 
        if (latency == 0)
@@ -4554,12 +4504,11 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
        return ret;
 }
 
-static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
-                       uint32_t pipe_htotal,
-                       uint32_t latency,
-                       uint_fixed_16_16_t plane_blocks_per_line)
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+              uint_fixed_16_16_t plane_blocks_per_line)
 {
-       uint32_t wm_intermediate_val;
+       u32 wm_intermediate_val;
        uint_fixed_16_16_t ret;
 
        if (latency == 0)
@@ -4575,8 +4524,8 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
 static uint_fixed_16_16_t
 intel_get_linetime_us(const struct intel_crtc_state *cstate)
 {
-       uint32_t pixel_rate;
-       uint32_t crtc_htotal;
+       u32 pixel_rate;
+       u32 crtc_htotal;
        uint_fixed_16_16_t linetime_us;
 
        if (!cstate->base.active)
@@ -4593,11 +4542,11 @@ intel_get_linetime_us(const struct intel_crtc_state *cstate)
        return linetime_us;
 }
 
-static uint32_t
+static u32
 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
                              const struct intel_plane_state *pstate)
 {
-       uint64_t adjusted_pixel_rate;
+       u64 adjusted_pixel_rate;
        uint_fixed_16_16_t downscale_amount;
 
        /* Shouldn't reach here on disabled planes... */
@@ -4624,10 +4573,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        const struct drm_plane_state *pstate = &intel_pstate->base;
        const struct drm_framebuffer *fb = pstate->fb;
-       uint32_t interm_pbpl;
-       struct intel_atomic_state *state =
-               to_intel_atomic_state(cstate->base.state);
-       bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+       u32 interm_pbpl;
 
        /* only NV12 format has two planes */
        if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
@@ -4663,7 +4609,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
                                                             intel_pstate);
 
        if (INTEL_GEN(dev_priv) >= 11 &&
-           fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8)
+           fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
                wp->dbuf_block_size = 256;
        else
                wp->dbuf_block_size = 512;
@@ -4688,7 +4634,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
                wp->y_min_scanlines = 4;
        }
 
-       if (apply_memory_bw_wa)
+       if (skl_needs_memory_bw_wa(dev_priv))
                wp->y_min_scanlines *= 2;
 
        wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -4702,7 +4648,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
 
                wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
                                                        wp->y_min_scanlines);
-       } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
+       } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
                interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
                                           wp->dbuf_block_size);
                wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -4720,28 +4666,34 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
        return 0;
 }
 
-static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
-                               const struct intel_plane_state *intel_pstate,
-                               uint16_t ddb_allocation,
-                               int level,
-                               const struct skl_wm_params *wp,
-                               const struct skl_wm_level *result_prev,
-                               struct skl_wm_level *result /* out */)
+static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
+{
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               return true;
+
+       /* The number of lines are ignored for the level 0 watermark. */
+       return level > 0;
+}
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+                                const struct intel_plane_state *intel_pstate,
+                                int level,
+                                const struct skl_wm_params *wp,
+                                const struct skl_wm_level *result_prev,
+                                struct skl_wm_level *result /* out */)
 {
        struct drm_i915_private *dev_priv =
                to_i915(intel_pstate->base.plane->dev);
-       const struct drm_plane_state *pstate = &intel_pstate->base;
-       uint32_t latency = dev_priv->wm.skl_latency[level];
+       u32 latency = dev_priv->wm.skl_latency[level];
        uint_fixed_16_16_t method1, method2;
        uint_fixed_16_16_t selected_result;
-       uint32_t res_blocks, res_lines;
-       struct intel_atomic_state *state =
-               to_intel_atomic_state(cstate->base.state);
-       bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
-       uint32_t min_disp_buf_needed;
+       u32 res_blocks, res_lines, min_ddb_alloc = 0;
 
-       if (latency == 0)
-               return level == 0 ? -EINVAL : 0;
+       if (latency == 0) {
+               /* reject it */
+               result->min_ddb_alloc = U16_MAX;
+               return;
+       }
 
        /* Display WA #1141: kbl,cfl */
        if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4749,7 +4701,7 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
            dev_priv->ipc_enabled)
                latency += 4;
 
-       if (apply_memory_bw_wa && wp->x_tiled)
+       if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
                latency += 15;
 
        method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
@@ -4766,15 +4718,8 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
                     wp->dbuf_block_size < 1) &&
                     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
                        selected_result = method2;
-               } else if (ddb_allocation >=
-                        fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
-                       if (IS_GEN9(dev_priv) &&
-                           !IS_GEMINILAKE(dev_priv))
-                               selected_result = min_fixed16(method1, method2);
-                       else
-                               selected_result = method2;
                } else if (latency >= wp->linetime_us) {
-                       if (IS_GEN9(dev_priv) &&
+                       if (IS_GEN(dev_priv, 9) &&
                            !IS_GEMINILAKE(dev_priv))
                                selected_result = min_fixed16(method1, method2);
                        else
@@ -4788,85 +4733,76 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
        res_lines = div_round_up_fixed16(selected_result,
                                         wp->plane_blocks_per_line);
 
-       /* Display WA #1125: skl,bxt,kbl,glk */
-       if (level == 0 && wp->rc_surface)
-               res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+       if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
+               /* Display WA #1125: skl,bxt,kbl */
+               if (level == 0 && wp->rc_surface)
+                       res_blocks +=
+                               fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+               /* Display WA #1126: skl,bxt,kbl */
+               if (level >= 1 && level <= 7) {
+                       if (wp->y_tiled) {
+                               res_blocks +=
+                                   fixed16_to_u32_round_up(wp->y_tile_minimum);
+                               res_lines += wp->y_min_scanlines;
+                       } else {
+                               res_blocks++;
+                       }
 
-       /* Display WA #1126: skl,bxt,kbl,glk */
-       if (level >= 1 && level <= 7) {
-               if (wp->y_tiled) {
-                       res_blocks += fixed16_to_u32_round_up(
-                                                       wp->y_tile_minimum);
-                       res_lines += wp->y_min_scanlines;
-               } else {
-                       res_blocks++;
+                       /*
+                        * Make sure result blocks for higher latency levels are
+                        * atleast as high as level below the current level.
+                        * Assumption in DDB algorithm optimization for special
+                        * cases. Also covers Display WA #1125 for RC.
+                        */
+                       if (result_prev->plane_res_b > res_blocks)
+                               res_blocks = result_prev->plane_res_b;
                }
-
-               /*
-                * Make sure result blocks for higher latency levels are atleast
-                * as high as level below the current level.
-                * Assumption in DDB algorithm optimization for special cases.
-                * Also covers Display WA #1125 for RC.
-                */
-               if (result_prev->plane_res_b > res_blocks)
-                       res_blocks = result_prev->plane_res_b;
        }
 
        if (INTEL_GEN(dev_priv) >= 11) {
                if (wp->y_tiled) {
-                       uint32_t extra_lines;
-                       uint_fixed_16_16_t fp_min_disp_buf_needed;
+                       int extra_lines;
 
                        if (res_lines % wp->y_min_scanlines == 0)
                                extra_lines = wp->y_min_scanlines;
                        else
                                extra_lines = wp->y_min_scanlines * 2 -
-                                             res_lines % wp->y_min_scanlines;
+                                       res_lines % wp->y_min_scanlines;
 
-                       fp_min_disp_buf_needed = mul_u32_fixed16(res_lines +
-                                               extra_lines,
-                                               wp->plane_blocks_per_line);
-                       min_disp_buf_needed = fixed16_to_u32_round_up(
-                                               fp_min_disp_buf_needed);
+                       min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
+                                                                wp->plane_blocks_per_line);
                } else {
-                       min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10);
+                       min_ddb_alloc = res_blocks +
+                               DIV_ROUND_UP(res_blocks, 10);
                }
-       } else {
-               min_disp_buf_needed = res_blocks;
        }
 
-       if ((level > 0 && res_lines > 31) ||
-           res_blocks >= ddb_allocation ||
-           min_disp_buf_needed >= ddb_allocation) {
-               /*
-                * If there are no valid level 0 watermarks, then we can't
-                * support this display configuration.
-                */
-               if (level) {
-                       return 0;
-               } else {
-                       struct drm_plane *plane = pstate->plane;
+       if (!skl_wm_has_lines(dev_priv, level))
+               res_lines = 0;
 
-                       DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
-                       DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
-                                     plane->base.id, plane->name,
-                                     res_blocks, ddb_allocation, res_lines);
-                       return -EINVAL;
-               }
+       if (res_lines > 31) {
+               /* reject it */
+               result->min_ddb_alloc = U16_MAX;
+               return;
        }
 
-       /* The number of lines are ignored for the level 0 watermark. */
+       /*
+        * If res_lines is valid, assume we can use this watermark level
+        * for now.  We'll come back and disable it after we calculate the
+        * DDB allocation if it turns out we don't actually have enough
+        * blocks to satisfy it.
+        */
        result->plane_res_b = res_blocks;
        result->plane_res_l = res_lines;
+       /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
+       result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
        result->plane_en = true;
-
-       return 0;
 }
 
-static int
+static void
 skl_compute_wm_levels(const struct intel_crtc_state *cstate,
                      const struct intel_plane_state *intel_pstate,
-                     uint16_t ddb_blocks,
                      const struct skl_wm_params *wm_params,
                      struct skl_wm_level *levels)
 {
@@ -4874,45 +4810,30 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
                to_i915(intel_pstate->base.plane->dev);
        int level, max_level = ilk_wm_max_level(dev_priv);
        struct skl_wm_level *result_prev = &levels[0];
-       int ret;
 
        for (level = 0; level <= max_level; level++) {
                struct skl_wm_level *result = &levels[level];
 
-               ret = skl_compute_plane_wm(cstate,
-                                          intel_pstate,
-                                          ddb_blocks,
-                                          level,
-                                          wm_params,
-                                          result_prev,
-                                          result);
-               if (ret)
-                       return ret;
+               skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+                                    result_prev, result);
 
                result_prev = result;
        }
-
-       return 0;
 }
 
-static uint32_t
+static u32
 skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
 {
        struct drm_atomic_state *state = cstate->base.state;
        struct drm_i915_private *dev_priv = to_i915(state->dev);
        uint_fixed_16_16_t linetime_us;
-       uint32_t linetime_wm;
+       u32 linetime_wm;
 
        linetime_us = intel_get_linetime_us(cstate);
-
-       if (is_fixed16_zero(linetime_us))
-               return 0;
-
        linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
 
-       /* Display WA #1135: bxt:ALL GLK:ALL */
-       if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
-           dev_priv->ipc_enabled)
+       /* Display WA #1135: BXT:ALL GLK:ALL */
+       if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
                linetime_wm /= 2;
 
        return linetime_wm;
@@ -4920,14 +4841,13 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
 
 static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
                                      const struct skl_wm_params *wp,
-                                     struct skl_plane_wm *wm,
-                                     uint16_t ddb_allocation)
+                                     struct skl_plane_wm *wm)
 {
        struct drm_device *dev = cstate->base.crtc->dev;
        const struct drm_i915_private *dev_priv = to_i915(dev);
-       uint16_t trans_min, trans_y_tile_min;
-       const uint16_t trans_amount = 10; /* This is configurable amount */
-       uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
+       u16 trans_min, trans_y_tile_min;
+       const u16 trans_amount = 10; /* This is configurable amount */
+       u16 wm0_sel_res_b, trans_offset_b, res_blocks;
 
        /* Transition WM are not recommended by HW team for GEN9 */
        if (INTEL_GEN(dev_priv) <= 9)
@@ -4956,8 +4876,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
        wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
 
        if (wp->y_tiled) {
-               trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
-                                                       wp->y_tile_minimum);
+               trans_y_tile_min =
+                       (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
                res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
                                trans_offset_b;
        } else {
@@ -4969,12 +4889,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
 
        }
 
-       res_blocks += 1;
-
-       if (res_blocks < ddb_allocation) {
-               wm->trans_wm.plane_res_b = res_blocks;
-               wm->trans_wm.plane_en = true;
-       }
+       /*
+        * Just assume we can enable the transition watermark.  After
+        * computing the DDB we'll come back and disable it if that
+        * assumption turns out to be false.
+        */
+       wm->trans_wm.plane_res_b = res_blocks + 1;
+       wm->trans_wm.plane_en = true;
 }
 
 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
@@ -4982,7 +4903,6 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
                                     enum plane_id plane_id, int color_plane)
 {
        struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
-       u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
        struct skl_wm_params wm_params;
        int ret;
 
@@ -4991,12 +4911,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
        if (ret)
                return ret;
 
-       ret = skl_compute_wm_levels(crtc_state, plane_state,
-                                   ddb_blocks, &wm_params, wm->wm);
-       if (ret)
-               return ret;
-
-       skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+       skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+       skl_compute_transition_wm(crtc_state, &wm_params, wm);
 
        return 0;
 }
@@ -5006,7 +4922,6 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
                                 enum plane_id plane_id)
 {
        struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
-       u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
        struct skl_wm_params wm_params;
        int ret;
 
@@ -5018,10 +4933,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
        if (ret)
                return ret;
 
-       ret = skl_compute_wm_levels(crtc_state, plane_state,
-                                   ddb_blocks, &wm_params, wm->uv_wm);
-       if (ret)
-               return ret;
+       skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
 
        return 0;
 }
@@ -5139,7 +5051,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
                               i915_reg_t reg,
                               const struct skl_wm_level *level)
 {
-       uint32_t val = 0;
+       u32 val = 0;
 
        if (level->plane_en) {
                val |= PLANE_WM_EN;
@@ -5230,6 +5142,23 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
        return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
 }
 
+static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
+                              const struct skl_pipe_wm *wm1,
+                              const struct skl_pipe_wm *wm2)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum plane_id plane_id;
+
+       for_each_plane_id_on_crtc(crtc, plane_id) {
+               if (!skl_plane_wm_equals(dev_priv,
+                                        &wm1->planes[plane_id],
+                                        &wm2->planes[plane_id]))
+                       return false;
+       }
+
+       return wm1->linetime == wm2->linetime;
+}
+
 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
                                           const struct skl_ddb_entry *b)
 {
@@ -5251,35 +5180,32 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
        return false;
 }
 
-static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
                              const struct skl_pipe_wm *old_pipe_wm,
                              struct skl_pipe_wm *pipe_wm, /* out */
                              bool *changed /* out */)
 {
-       struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
+       struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
        int ret;
 
-       ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
+       ret = skl_build_pipe_wm(cstate, pipe_wm);
        if (ret)
                return ret;
 
-       if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
-               *changed = false;
-       else
-               *changed = true;
+       *changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
 
        return 0;
 }
 
-static uint32_t
-pipes_modified(struct drm_atomic_state *state)
+static u32
+pipes_modified(struct intel_atomic_state *state)
 {
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *cstate;
-       uint32_t i, ret = 0;
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *cstate;
+       u32 i, ret = 0;
 
-       for_each_new_crtc_in_state(state, crtc, cstate, i)
-               ret |= drm_crtc_mask(crtc);
+       for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
+               ret |= drm_crtc_mask(&crtc->base);
 
        return ret;
 }
@@ -5314,11 +5240,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
 }
 
 static int
-skl_compute_ddb(struct drm_atomic_state *state)
+skl_compute_ddb(struct intel_atomic_state *state)
 {
-       const struct drm_i915_private *dev_priv = to_i915(state->dev);
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+       const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
        struct intel_crtc_state *old_crtc_state;
        struct intel_crtc_state *new_crtc_state;
        struct intel_crtc *crtc;
@@ -5326,7 +5251,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
 
        memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
 
-       for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
                                            new_crtc_state, i) {
                ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
                if (ret)
@@ -5372,15 +5297,13 @@ skl_print_wm_changes(struct intel_atomic_state *state)
 }
 
 static int
-skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
 {
-       struct drm_device *dev = state->dev;
+       struct drm_device *dev = state->base.dev;
        const struct drm_i915_private *dev_priv = to_i915(dev);
-       const struct drm_crtc *crtc;
-       const struct drm_crtc_state *cstate;
-       struct intel_crtc *intel_crtc;
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       uint32_t realloc_pipes = pipes_modified(state);
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *crtc_state;
+       u32 realloc_pipes = pipes_modified(state);
        int ret, i;
 
        /*
@@ -5398,7 +5321,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
         * since any racing commits that want to update them would need to
         * hold _all_ CRTC state mutexes.
         */
-       for_each_new_crtc_in_state(state, crtc, cstate, i)
+       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
                (*changed) = true;
 
        if (!*changed)
@@ -5412,20 +5335,20 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
         */
        if (dev_priv->wm.distrust_bios_wm) {
                ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
-                                      state->acquire_ctx);
+                                      state->base.acquire_ctx);
                if (ret)
                        return ret;
 
-               intel_state->active_pipe_changes = ~0;
+               state->active_pipe_changes = ~0;
 
                /*
-                * We usually only initialize intel_state->active_crtcs if we
+                * We usually only initialize state->active_crtcs if we
                 * we're doing a modeset; make sure this field is always
                 * initialized during the sanitization process that happens
                 * on the first commit too.
                 */
-               if (!intel_state->modeset)
-                       intel_state->active_crtcs = dev_priv->active_crtcs;
+               if (!state->modeset)
+                       state->active_crtcs = dev_priv->active_crtcs;
        }
 
        /*
@@ -5441,21 +5364,19 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
         * any other display updates race with this transaction, so we need
         * to grab the lock on *all* CRTC's.
         */
-       if (intel_state->active_pipe_changes || intel_state->modeset) {
+       if (state->active_pipe_changes || state->modeset) {
                realloc_pipes = ~0;
-               intel_state->wm_results.dirty_pipes = ~0;
+               state->wm_results.dirty_pipes = ~0;
        }
 
        /*
         * We're not recomputing for the pipes not included in the commit, so
         * make sure we start with the current state.
         */
-       for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
-               struct intel_crtc_state *cstate;
-
-               cstate = intel_atomic_get_crtc_state(state, intel_crtc);
-               if (IS_ERR(cstate))
-                       return PTR_ERR(cstate);
+       for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
+               crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
        }
 
        return 0;
@@ -5522,12 +5443,12 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
 }
 
 static int
-skl_compute_wm(struct drm_atomic_state *state)
+skl_compute_wm(struct intel_atomic_state *state)
 {
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *cstate;
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct skl_ddb_values *results = &intel_state->wm_results;
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *cstate;
+       struct intel_crtc_state *old_crtc_state;
+       struct skl_ddb_values *results = &state->wm_results;
        struct skl_pipe_wm *pipe_wm;
        bool changed = false;
        int ret, i;
@@ -5539,47 +5460,35 @@ skl_compute_wm(struct drm_atomic_state *state)
        if (ret || !changed)
                return ret;
 
-       ret = skl_compute_ddb(state);
-       if (ret)
-               return ret;
-
        /*
         * Calculate WM's for all pipes that are part of this transaction.
-        * Note that the DDB allocation above may have added more CRTC's that
+        * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
         * weren't otherwise being modified (and set bits in dirty_pipes) if
         * pipe allocations had to change.
-        *
-        * FIXME:  Now that we're doing this in the atomic check phase, we
-        * should allow skl_update_pipe_wm() to return failure in cases where
-        * no suitable watermark values can be found.
         */
-       for_each_new_crtc_in_state(state, crtc, cstate, i) {
-               struct intel_crtc_state *intel_cstate =
-                       to_intel_crtc_state(cstate);
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           cstate, i) {
                const struct skl_pipe_wm *old_pipe_wm =
-                       &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
+                       &old_crtc_state->wm.skl.optimal;
 
-               pipe_wm = &intel_cstate->wm.skl.optimal;
+               pipe_wm = &cstate->wm.skl.optimal;
                ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
                if (ret)
                        return ret;
 
-               ret = skl_wm_add_affected_planes(intel_state,
-                                                to_intel_crtc(crtc));
+               ret = skl_wm_add_affected_planes(state, crtc);
                if (ret)
                        return ret;
 
                if (changed)
-                       results->dirty_pipes |= drm_crtc_mask(crtc);
-
-               if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
-                       /* This pipe's WM's did not change */
-                       continue;
-
-               intel_cstate->update_wm_pre = true;
+                       results->dirty_pipes |= drm_crtc_mask(&crtc->base);
        }
 
-       skl_print_wm_changes(intel_state);
+       ret = skl_compute_ddb(state);
+       if (ret)
+               return ret;
+
+       skl_print_wm_changes(state);
 
        return 0;
 }
@@ -5617,13 +5526,13 @@ static void skl_initial_wm(struct intel_atomic_state *state,
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
-static void ilk_compute_wm_config(struct drm_device *dev,
+static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
                                  struct intel_wm_config *config)
 {
        struct intel_crtc *crtc;
 
        /* Compute the currently _active_ config */
-       for_each_intel_crtc(dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
 
                if (!wm->pipe_enabled)
@@ -5637,25 +5546,24 @@ static void ilk_compute_wm_config(struct drm_device *dev,
 
 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
        struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
        struct ilk_wm_maximums max;
        struct intel_wm_config config = {};
        struct ilk_wm_values results = {};
        enum intel_ddb_partitioning partitioning;
 
-       ilk_compute_wm_config(dev, &config);
+       ilk_compute_wm_config(dev_priv, &config);
 
-       ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
-       ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+       ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
 
        /* 5/6 split only in single pipe config on IVB+ */
        if (INTEL_GEN(dev_priv) >= 7 &&
            config.num_pipes_active == 1 && config.sprites_enabled) {
-               ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
-               ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+               ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
+               ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
 
-               best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+               best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
        } else {
                best_lp_wm = &lp_wm_1_2;
        }
@@ -5663,7 +5571,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
        partitioning = (best_lp_wm == &lp_wm_1_2) ?
                       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
-       ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+       ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
 
        ilk_write_wm_values(dev_priv, &results);
 }
@@ -5694,7 +5602,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
-static inline void skl_wm_level_from_reg_val(uint32_t val,
+static inline void skl_wm_level_from_reg_val(u32 val,
                                             struct skl_wm_level *level)
 {
        level->plane_en = val & PLANE_WM_EN;
@@ -5703,19 +5611,18 @@ static inline void skl_wm_level_from_reg_val(uint32_t val,
                PLANE_WM_LINES_MASK;
 }
 
-void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
                              struct skl_pipe_wm *out)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
        int level, max_level;
        enum plane_id plane_id;
-       uint32_t val;
+       u32 val;
 
        max_level = ilk_wm_max_level(dev_priv);
 
-       for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+       for_each_plane_id_on_crtc(crtc, plane_id) {
                struct skl_plane_wm *wm = &out->planes[plane_id];
 
                for (level = 0; level <= max_level; level++) {
@@ -5735,30 +5642,27 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
                skl_wm_level_from_reg_val(val, &wm->trans_wm);
        }
 
-       if (!intel_crtc->active)
+       if (!crtc->active)
                return;
 
        out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
 }
 
-void skl_wm_get_hw_state(struct drm_device *dev)
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
        struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
-       struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
+       struct intel_crtc *crtc;
        struct intel_crtc_state *cstate;
 
        skl_ddb_get_hw_state(dev_priv, ddb);
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               intel_crtc = to_intel_crtc(crtc);
-               cstate = to_intel_crtc_state(crtc->state);
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               cstate = to_intel_crtc_state(crtc->base.state);
 
                skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
 
-               if (intel_crtc->active)
-                       hw->dirty_pipes |= drm_crtc_mask(crtc);
+               if (crtc->active)
+                       hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
        }
 
        if (dev_priv->active_crtcs) {
@@ -5767,15 +5671,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
        }
 }
 
-static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct ilk_wm_values *hw = &dev_priv->wm.hw;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+       struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
        struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
-       enum pipe pipe = intel_crtc->pipe;
+       enum pipe pipe = crtc->pipe;
        static const i915_reg_t wm0_pipe_reg[] = {
                [PIPE_A] = WM0_PIPEA_ILK,
                [PIPE_B] = WM0_PIPEB_ILK,
@@ -5788,7 +5691,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 
        memset(active, 0, sizeof(*active));
 
-       active->pipe_enabled = intel_crtc->active;
+       active->pipe_enabled = crtc->active;
 
        if (active->pipe_enabled) {
                u32 tmp = hw->wm_pipe[pipe];
@@ -5816,7 +5719,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
                        active->wm[level].enable = true;
        }
 
-       intel_crtc->wm.active.ilk = *active;
+       crtc->wm.active.ilk = *active;
 }
 
 #define _FW_WM(value, plane) \
@@ -5827,7 +5730,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
                               struct g4x_wm_values *wm)
 {
-       uint32_t tmp;
+       u32 tmp;
 
        tmp = I915_READ(DSPFW1);
        wm->sr.plane = _FW_WM(tmp, SR);
@@ -5854,7 +5757,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
                               struct vlv_wm_values *wm)
 {
        enum pipe pipe;
-       uint32_t tmp;
+       u32 tmp;
 
        for_each_pipe(dev_priv, pipe) {
                tmp = I915_READ(VLV_DDL(pipe));
@@ -5926,9 +5829,8 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
 #undef _FW_WM
 #undef _FW_WM_VLV
 
-void g4x_wm_get_hw_state(struct drm_device *dev)
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct g4x_wm_values *wm = &dev_priv->wm.g4x;
        struct intel_crtc *crtc;
 
@@ -5936,7 +5838,7 @@ void g4x_wm_get_hw_state(struct drm_device *dev)
 
        wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
 
-       for_each_intel_crtc(dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                struct intel_crtc_state *crtc_state =
                        to_intel_crtc_state(crtc->base.state);
                struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -6067,9 +5969,8 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
-void vlv_wm_get_hw_state(struct drm_device *dev)
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct vlv_wm_values *wm = &dev_priv->wm.vlv;
        struct intel_crtc *crtc;
        u32 val;
@@ -6113,7 +6014,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
                mutex_unlock(&dev_priv->pcu_lock);
        }
 
-       for_each_intel_crtc(dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                struct intel_crtc_state *crtc_state =
                        to_intel_crtc_state(crtc->base.state);
                struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -6230,15 +6131,14 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
         */
 }
 
-void ilk_wm_get_hw_state(struct drm_device *dev)
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct ilk_wm_values *hw = &dev_priv->wm.hw;
-       struct drm_crtc *crtc;
+       struct intel_crtc *crtc;
 
        ilk_init_lp_watermarks(dev_priv);
 
-       for_each_crtc(dev, crtc)
+       for_each_intel_crtc(&dev_priv->drm, crtc)
                ilk_pipe_wm_get_hw_state(crtc);
 
        hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -6339,10 +6239,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
  */
 DEFINE_SPINLOCK(mchdev_lock);
 
-/* Global for IPS driver to get at the current i915 device. Protected by
- * mchdev_lock. */
-static struct drm_i915_private *i915_mch_dev;
-
 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
 {
        u16 rgvswctl;
@@ -6805,7 +6701,7 @@ void gen6_rps_boost(struct i915_request *rq,
        if (!rps->enabled)
                return;
 
-       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+       if (i915_request_signaled(rq))
                return;
 
        /* Serializes with i915_request_retire() */
@@ -7049,7 +6945,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* Program defaults and thresholds for RPS */
-       if (IS_GEN9(dev_priv))
+       if (IS_GEN(dev_priv, 9))
                I915_WRITE(GEN6_RC_VIDEO_FREQ,
                        GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
 
@@ -7285,9 +7181,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
 
        rc6vids = 0;
        ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
-       if (IS_GEN6(dev_priv) && ret) {
+       if (IS_GEN(dev_priv, 6) && ret) {
                DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
-       } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+       } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
                DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
                          GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
                rc6vids &= 0xffff00;
@@ -7412,7 +7308,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
 
        val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-       switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
+       switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
        case 8:
                /* (2 * 4) config */
                rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -7985,16 +7881,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
 
 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 {
-       unsigned long val;
+       intel_wakeref_t wakeref;
+       unsigned long val = 0;
 
-       if (!IS_GEN5(dev_priv))
+       if (!IS_GEN(dev_priv, 5))
                return 0;
 
-       spin_lock_irq(&mchdev_lock);
-
-       val = __i915_chipset_val(dev_priv);
-
-       spin_unlock_irq(&mchdev_lock);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               spin_lock_irq(&mchdev_lock);
+               val = __i915_chipset_val(dev_priv);
+               spin_unlock_irq(&mchdev_lock);
+       }
 
        return val;
 }
@@ -8071,14 +7968,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 
 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
-       if (!IS_GEN5(dev_priv))
-               return;
-
-       spin_lock_irq(&mchdev_lock);
+       intel_wakeref_t wakeref;
 
-       __i915_update_gfx_val(dev_priv);
+       if (!IS_GEN(dev_priv, 5))
+               return;
 
-       spin_unlock_irq(&mchdev_lock);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               spin_lock_irq(&mchdev_lock);
+               __i915_update_gfx_val(dev_priv);
+               spin_unlock_irq(&mchdev_lock);
+       }
 }
 
 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
@@ -8120,18 +8019,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
 
 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 {
-       unsigned long val;
+       intel_wakeref_t wakeref;
+       unsigned long val = 0;
 
-       if (!IS_GEN5(dev_priv))
+       if (!IS_GEN(dev_priv, 5))
                return 0;
 
-       spin_lock_irq(&mchdev_lock);
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               spin_lock_irq(&mchdev_lock);
+               val = __i915_gfx_val(dev_priv);
+               spin_unlock_irq(&mchdev_lock);
+       }
 
-       val = __i915_gfx_val(dev_priv);
+       return val;
+}
 
-       spin_unlock_irq(&mchdev_lock);
+static struct drm_i915_private *i915_mch_dev;
 
-       return val;
+static struct drm_i915_private *mchdev_get(void)
+{
+       struct drm_i915_private *i915;
+
+       rcu_read_lock();
+       i915 = i915_mch_dev;
+       if (!kref_get_unless_zero(&i915->drm.ref))
+               i915 = NULL;
+       rcu_read_unlock();
+
+       return i915;
 }
 
 /**
@@ -8142,23 +8057,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
  */
 unsigned long i915_read_mch_val(void)
 {
-       struct drm_i915_private *dev_priv;
-       unsigned long chipset_val, graphics_val, ret = 0;
-
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev)
-               goto out_unlock;
-       dev_priv = i915_mch_dev;
+       struct drm_i915_private *i915;
+       unsigned long chipset_val = 0;
+       unsigned long graphics_val = 0;
+       intel_wakeref_t wakeref;
 
-       chipset_val = __i915_chipset_val(dev_priv);
-       graphics_val = __i915_gfx_val(dev_priv);
-
-       ret = chipset_val + graphics_val;
+       i915 = mchdev_get();
+       if (!i915)
+               return 0;
 
-out_unlock:
-       spin_unlock_irq(&mchdev_lock);
+       with_intel_runtime_pm(i915, wakeref) {
+               spin_lock_irq(&mchdev_lock);
+               chipset_val = __i915_chipset_val(i915);
+               graphics_val = __i915_gfx_val(i915);
+               spin_unlock_irq(&mchdev_lock);
+       }
 
-       return ret;
+       drm_dev_put(&i915->drm);
+       return chipset_val + graphics_val;
 }
 EXPORT_SYMBOL_GPL(i915_read_mch_val);
 
@@ -8169,23 +8085,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
  */
 bool i915_gpu_raise(void)
 {
-       struct drm_i915_private *dev_priv;
-       bool ret = true;
+       struct drm_i915_private *i915;
 
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev) {
-               ret = false;
-               goto out_unlock;
-       }
-       dev_priv = i915_mch_dev;
-
-       if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
-               dev_priv->ips.max_delay--;
+       i915 = mchdev_get();
+       if (!i915)
+               return false;
 
-out_unlock:
+       spin_lock_irq(&mchdev_lock);
+       if (i915->ips.max_delay > i915->ips.fmax)
+               i915->ips.max_delay--;
        spin_unlock_irq(&mchdev_lock);
 
-       return ret;
+       drm_dev_put(&i915->drm);
+       return true;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_raise);
 
@@ -8197,23 +8109,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
  */
 bool i915_gpu_lower(void)
 {
-       struct drm_i915_private *dev_priv;
-       bool ret = true;
-
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev) {
-               ret = false;
-               goto out_unlock;
-       }
-       dev_priv = i915_mch_dev;
+       struct drm_i915_private *i915;
 
-       if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
-               dev_priv->ips.max_delay++;
+       i915 = mchdev_get();
+       if (!i915)
+               return false;
 
-out_unlock:
+       spin_lock_irq(&mchdev_lock);
+       if (i915->ips.max_delay < i915->ips.min_delay)
+               i915->ips.max_delay++;
        spin_unlock_irq(&mchdev_lock);
 
-       return ret;
+       drm_dev_put(&i915->drm);
+       return true;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_lower);
 
@@ -8224,13 +8132,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
  */
 bool i915_gpu_busy(void)
 {
-       bool ret = false;
+       struct drm_i915_private *i915;
+       bool ret;
 
-       spin_lock_irq(&mchdev_lock);
-       if (i915_mch_dev)
-               ret = i915_mch_dev->gt.awake;
-       spin_unlock_irq(&mchdev_lock);
+       i915 = mchdev_get();
+       if (!i915)
+               return false;
+
+       ret = i915->gt.awake;
 
+       drm_dev_put(&i915->drm);
        return ret;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_busy);
@@ -8243,24 +8154,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
  */
 bool i915_gpu_turbo_disable(void)
 {
-       struct drm_i915_private *dev_priv;
-       bool ret = true;
-
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev) {
-               ret = false;
-               goto out_unlock;
-       }
-       dev_priv = i915_mch_dev;
-
-       dev_priv->ips.max_delay = dev_priv->ips.fstart;
+       struct drm_i915_private *i915;
+       bool ret;
 
-       if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
-               ret = false;
+       i915 = mchdev_get();
+       if (!i915)
+               return false;
 
-out_unlock:
+       spin_lock_irq(&mchdev_lock);
+       i915->ips.max_delay = i915->ips.fstart;
+       ret = ironlake_set_drps(i915, i915->ips.fstart);
        spin_unlock_irq(&mchdev_lock);
 
+       drm_dev_put(&i915->drm);
        return ret;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@@ -8289,18 +8195,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
 {
        /* We only register the i915 ips part with intel-ips once everything is
         * set up, to avoid intel-ips sneaking in and reading bogus values. */
-       spin_lock_irq(&mchdev_lock);
-       i915_mch_dev = dev_priv;
-       spin_unlock_irq(&mchdev_lock);
+       rcu_assign_pointer(i915_mch_dev, dev_priv);
 
        ips_ping_for_i915_load();
 }
 
 void intel_gpu_ips_teardown(void)
 {
-       spin_lock_irq(&mchdev_lock);
-       i915_mch_dev = NULL;
-       spin_unlock_irq(&mchdev_lock);
+       rcu_assign_pointer(i915_mch_dev, NULL);
 }
 
 static void intel_init_emon(struct drm_i915_private *dev_priv)
@@ -8410,7 +8312,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
                              intel_freq_opcode(dev_priv, 450));
 
        /* After setting max-softlimit, find the overclock max freq */
-       if (IS_GEN6(dev_priv) ||
+       if (IS_GEN(dev_priv, 6) ||
            IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
                u32 params = 0;
 
@@ -8639,7 +8541,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
 
 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+       u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
        /*
         * Required for FBC
@@ -8711,7 +8613,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
 {
        int pipe;
-       uint32_t val;
+       u32 val;
 
        /*
         * On Ibex Peak and Cougar Point, we need to disable clock
@@ -8746,7 +8648,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
 {
-       uint32_t tmp;
+       u32 tmp;
 
        tmp = I915_READ(MCH_SSKPD);
        if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
@@ -8756,7 +8658,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
 
 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+       u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
        I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
@@ -8850,7 +8752,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 {
-       uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+       u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
 
        /*
         * WaVSThreadDispatchOverride:ivb,vlv
@@ -8886,7 +8788,7 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
 {
        if (HAS_PCH_LPT_LP(dev_priv)) {
-               uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+               u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
 
                val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9124,7 +9026,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       uint32_t snpcr;
+       u32 snpcr;
 
        I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -9333,7 +9235,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       uint32_t dspclk_gate;
+       u32 dspclk_gate;
 
        I915_WRITE(RENCLK_GATE_D1, 0);
        I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@@ -9480,9 +9382,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.init_clock_gating = ivb_init_clock_gating;
        else if (IS_VALLEYVIEW(dev_priv))
                dev_priv->display.init_clock_gating = vlv_init_clock_gating;
-       else if (IS_GEN6(dev_priv))
+       else if (IS_GEN(dev_priv, 6))
                dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-       else if (IS_GEN5(dev_priv))
+       else if (IS_GEN(dev_priv, 5))
                dev_priv->display.init_clock_gating = ilk_init_clock_gating;
        else if (IS_G4X(dev_priv))
                dev_priv->display.init_clock_gating = g4x_init_clock_gating;
@@ -9490,11 +9392,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
        else if (IS_I965G(dev_priv))
                dev_priv->display.init_clock_gating = i965g_init_clock_gating;
-       else if (IS_GEN3(dev_priv))
+       else if (IS_GEN(dev_priv, 3))
                dev_priv->display.init_clock_gating = gen3_init_clock_gating;
        else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
                dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-       else if (IS_GEN2(dev_priv))
+       else if (IS_GEN(dev_priv, 2))
                dev_priv->display.init_clock_gating = i830_init_clock_gating;
        else {
                MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -9508,7 +9410,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
        /* For cxsr */
        if (IS_PINEVIEW(dev_priv))
                i915_pineview_get_mem_freq(dev_priv);
-       else if (IS_GEN5(dev_priv))
+       else if (IS_GEN(dev_priv, 5))
                i915_ironlake_get_mem_freq(dev_priv);
 
        /* For FIFO watermark updates */
@@ -9520,9 +9422,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                ilk_setup_wm_latency(dev_priv);
 
-               if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
+               if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
                     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
-                   (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
+                   (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
                     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
                        dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
                        dev_priv->display.compute_intermediate_wm =
@@ -9563,12 +9465,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
                        dev_priv->display.update_wm = NULL;
                } else
                        dev_priv->display.update_wm = pineview_update_wm;
-       } else if (IS_GEN4(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 4)) {
                dev_priv->display.update_wm = i965_update_wm;
-       } else if (IS_GEN3(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 3)) {
                dev_priv->display.update_wm = i9xx_update_wm;
                dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                if (INTEL_INFO(dev_priv)->num_pipes == 1) {
                        dev_priv->display.update_wm = i845_update_wm;
                        dev_priv->display.get_fifo_size = i845_get_fifo_size;
@@ -9583,7 +9485,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 
 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
 {
-       uint32_t flags =
+       u32 flags =
                I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
 
        switch (flags) {
@@ -9606,7 +9508,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
 
 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
 {
-       uint32_t flags =
+       u32 flags =
                I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
 
        switch (flags) {
index 419e563425239951bdaa43bc05dae5c83c5484d5..84a0fb98156135839b5bc966dfcd09ea81f98556 100644 (file)
@@ -51,7 +51,6 @@
  * must be correctly synchronized/cancelled when shutting down the pipe."
  */
 
-#include <drm/drmP.h>
 
 #include "intel_drv.h"
 #include "i915_drv.h"
@@ -71,17 +70,17 @@ static bool psr_global_enabled(u32 debug)
 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
                               const struct intel_crtc_state *crtc_state)
 {
-       /* Disable PSR2 by default for all platforms */
-       if (i915_modparams.enable_psr == -1)
-               return false;
-
        /* Cannot enable DSC and PSR2 simultaneously */
        WARN_ON(crtc_state->dsc_params.compression_enable &&
                crtc_state->has_psr2);
 
        switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+       case I915_PSR_DEBUG_DISABLE:
        case I915_PSR_DEBUG_FORCE_PSR1:
                return false;
+       case I915_PSR_DEBUG_DEFAULT:
+               if (i915_modparams.enable_psr <= 0)
+                       return false;
        default:
                return crtc_state->has_psr2;
        }
@@ -231,7 +230,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 
 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
 {
-       uint8_t dprx = 0;
+       u8 dprx = 0;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
                              &dprx) != 1)
@@ -241,7 +240,7 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
 
 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
 {
-       uint8_t alpm_caps = 0;
+       u8 alpm_caps = 0;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
                              &alpm_caps) != 1)
@@ -261,6 +260,32 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
        return val;
 }
 
+static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
+{
+       u16 val;
+       ssize_t r;
+
+       /*
+        * Returning the default X granularity if granularity not required or
+        * if DPCD read fails
+        */
+       if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
+               return 4;
+
+       r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
+       if (r != 2)
+               DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+
+       /*
+        * Spec says that if the value read is 0 the default granularity should
+        * be used instead.
+        */
+       if (r != 2 || val == 0)
+               val = 4;
+
+       return val;
+}
+
 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv =
@@ -274,10 +299,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
                      intel_dp->psr_dpcd[0]);
 
+       if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+               DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+               return;
+       }
+
        if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
                DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
                return;
        }
+
        dev_priv->psr.sink_support = true;
        dev_priv->psr.sink_sync_latency =
                intel_dp_get_sink_sync_latency(intel_dp);
@@ -309,6 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
                if (dev_priv->psr.sink_psr2_support) {
                        dev_priv->psr.colorimetry_support =
                                intel_dp_get_colorimetry_status(intel_dp);
+                       dev_priv->psr.su_x_granularity =
+                               intel_dp_get_su_x_granulartiy(intel_dp);
                }
        }
 }
@@ -351,7 +384,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 aux_clock_divider, aux_ctl;
        int i;
-       static const uint8_t aux_msg[] = {
+       static const u8 aux_msg[] = {
                [0] = DP_AUX_NATIVE_WRITE << 4,
                [1] = DP_SET_POWER >> 8,
                [2] = DP_SET_POWER & 0xff,
@@ -388,13 +421,15 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
        if (dev_priv->psr.psr2_enabled) {
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
                                   DP_ALPM_ENABLE);
-               dpcd_val |= DP_PSR_ENABLE_PSR2;
+               dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+       } else {
+               if (dev_priv->psr.link_standby)
+                       dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+
+               if (INTEL_GEN(dev_priv) >= 8)
+                       dpcd_val |= DP_PSR_CRC_VERIFICATION;
        }
 
-       if (dev_priv->psr.link_standby)
-               dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
-       if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
-               dpcd_val |= DP_PSR_CRC_VERIFICATION;
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
 
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -468,9 +503,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
        idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
        val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
 
-       /* FIXME: selective update is probably totally broken because it doesn't
-        * mesh at all with our frontbuffer tracking. And the hw alone isn't
-        * good enough. */
        val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                val |= EDP_Y_COORDINATE_ENABLE;
@@ -519,7 +551,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
                psr_max_h = 4096;
                psr_max_v = 2304;
-       } else if (IS_GEN9(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 9)) {
                psr_max_h = 3640;
                psr_max_v = 2304;
        }
@@ -531,6 +563,18 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
        }
 
+       /*
+        * HW sends SU blocks of size four scan lines, which means the starting
+        * X coordinate and Y granularity requirements will always be met. We
+        * only need to validate the SU block width is a multiple of
+        * x granularity.
+        */
+       if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+               DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+                             crtc_hdisplay, dev_priv->psr.su_x_granularity);
+               return false;
+       }
+
        return true;
 }
 
@@ -641,17 +685,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                hsw_psr_setup_aux(intel_dp);
 
-       if (dev_priv->psr.psr2_enabled) {
+       if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+                                          !IS_GEMINILAKE(dev_priv))) {
                i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
                                                        cpu_transcoder);
                u32 chicken = I915_READ(reg);
 
-               if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
-                       chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
-                                  | PSR2_ADD_VERTICAL_LINE_COUNT);
-
-               else
-                       chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
+               chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
+                          PSR2_ADD_VERTICAL_LINE_COUNT;
                I915_WRITE(reg, chicken);
        }
 
index fbeaec3994e7fff3d4a72f94225dd85fa536c832..7f841dba87b3026893b2cb330168cb74246d5804 100644 (file)
 
 #include <linux/log2.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "i915_gem_render_state.h"
+#include "i915_reset.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include "intel_workarounds.h"
  */
 #define LEGACY_REQUEST_SIZE 200
 
-static unsigned int __intel_ring_space(unsigned int head,
-                                      unsigned int tail,
-                                      unsigned int size)
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
 {
-       /*
-        * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
-        * same cacheline, the Head Pointer must not be greater than the Tail
-        * Pointer."
-        */
-       GEM_BUG_ON(!is_power_of_2(size));
-       return (head - tail - CACHELINE_BYTES) & (size - 1);
+       return (i915_ggtt_offset(engine->status_page.vma) +
+               I915_GEM_HWS_INDEX_ADDR);
 }
 
 unsigned int intel_ring_update_space(struct intel_ring *ring)
@@ -133,7 +126,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
        cmd = MI_FLUSH;
        if (mode & EMIT_INVALIDATE) {
                cmd |= MI_EXE_FLUSH;
-               if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
+               if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
                        cmd |= MI_INVALIDATE_ISP;
        }
 
@@ -217,7 +210,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
+gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
 {
        u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
        u32 *cs;
@@ -257,7 +250,7 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
-       ret = intel_emit_post_sync_nonzero_flush(rq);
+       ret = gen6_emit_post_sync_nonzero_flush(rq);
        if (ret)
                return ret;
 
@@ -300,6 +293,43 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
        return 0;
 }
 
+static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+       /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+       *cs++ = 0;
+       *cs++ = 0;
+
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_QW_WRITE;
+       *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+       *cs++ = 0;
+
+       /* Finally we can flush and with it emit the breadcrumb */
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                PIPE_CONTROL_DC_FLUSH_ENABLE |
+                PIPE_CONTROL_QW_WRITE |
+                PIPE_CONTROL_CS_STALL);
+       *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
+       *cs++ = rq->fence.seqno;
+
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+       *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
+       *cs++ = rq->global_seqno;
+
+       *cs++ = MI_USER_INTERRUPT;
+       *cs++ = MI_NOOP;
+
+       rq->tail = intel_ring_offset(rq, cs);
+       assert_ring_tail_valid(rq->ring, rq->tail);
+
+       return cs;
+}
+
 static int
 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
 {
@@ -379,11 +409,111 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
        return 0;
 }
 
-static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
+static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                PIPE_CONTROL_DC_FLUSH_ENABLE |
+                PIPE_CONTROL_FLUSH_ENABLE |
+                PIPE_CONTROL_QW_WRITE |
+                PIPE_CONTROL_GLOBAL_GTT_IVB |
+                PIPE_CONTROL_CS_STALL);
+       *cs++ = rq->timeline->hwsp_offset;
+       *cs++ = rq->fence.seqno;
+
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = (PIPE_CONTROL_QW_WRITE |
+                PIPE_CONTROL_GLOBAL_GTT_IVB |
+                PIPE_CONTROL_CS_STALL);
+       *cs++ = intel_hws_seqno_address(rq->engine);
+       *cs++ = rq->global_seqno;
+
+       *cs++ = MI_USER_INTERRUPT;
+       *cs++ = MI_NOOP;
+
+       rq->tail = intel_ring_offset(rq, cs);
+       assert_ring_tail_valid(rq->ring, rq->tail);
+
+       return cs;
+}
+
+static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+       GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+       GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+       *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+       *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+       *cs++ = rq->fence.seqno;
+
+       *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+       *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
+       *cs++ = rq->global_seqno;
+
+       *cs++ = MI_USER_INTERRUPT;
+       *cs++ = MI_NOOP;
+
+       rq->tail = intel_ring_offset(rq, cs);
+       assert_ring_tail_valid(rq->ring, rq->tail);
+
+       return cs;
+}
+
+#define GEN7_XCS_WA 32
+static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+       int i;
+
+       GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+       GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+       *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+       *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+       *cs++ = rq->fence.seqno;
+
+       *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+       *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
+       *cs++ = rq->global_seqno;
+
+       for (i = 0; i < GEN7_XCS_WA; i++) {
+               *cs++ = MI_STORE_DWORD_INDEX;
+               *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+               *cs++ = rq->fence.seqno;
+       }
+
+       *cs++ = MI_FLUSH_DW;
+       *cs++ = 0;
+       *cs++ = 0;
+
+       *cs++ = MI_USER_INTERRUPT;
+
+       rq->tail = intel_ring_offset(rq, cs);
+       assert_ring_tail_valid(rq->ring, rq->tail);
+
+       return cs;
+}
+#undef GEN7_XCS_WA
+
+static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
+{
+       /*
+        * Keep the render interrupt unmasked as this papers over
+        * lost interrupts following a reset.
+        */
+       if (engine->class == RENDER_CLASS) {
+               if (INTEL_GEN(engine->i915) >= 6)
+                       mask &= ~BIT(0);
+               else
+                       mask &= ~I915_USER_INTERRUPT;
+       }
+
+       intel_engine_set_hwsp_writemask(engine, mask);
+}
+
+static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       struct page *page = virt_to_page(engine->status_page.page_addr);
-       phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
        u32 addr;
 
        addr = lower_32_bits(phys);
@@ -393,15 +523,30 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
        I915_WRITE(HWS_PGA, addr);
 }
 
-static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
+static struct page *status_page(struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
+
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       return sg_page(obj->mm.pages->sgl);
+}
+
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
+{
+       set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
+       set_hwstam(engine, ~0u);
+}
+
+static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       i915_reg_t mmio;
+       i915_reg_t hwsp;
 
-       /* The ring status page addresses are no longer next to the rest of
+       /*
+        * The ring status page addresses are no longer next to the rest of
         * the ring registers as of gen7.
         */
-       if (IS_GEN7(dev_priv)) {
+       if (IS_GEN(dev_priv, 7)) {
                switch (engine->id) {
                /*
                 * No more rings exist on Gen7. Default case is only to shut up
@@ -410,56 +555,55 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                default:
                        GEM_BUG_ON(engine->id);
                case RCS:
-                       mmio = RENDER_HWS_PGA_GEN7;
+                       hwsp = RENDER_HWS_PGA_GEN7;
                        break;
                case BCS:
-                       mmio = BLT_HWS_PGA_GEN7;
+                       hwsp = BLT_HWS_PGA_GEN7;
                        break;
                case VCS:
-                       mmio = BSD_HWS_PGA_GEN7;
+                       hwsp = BSD_HWS_PGA_GEN7;
                        break;
                case VECS:
-                       mmio = VEBOX_HWS_PGA_GEN7;
+                       hwsp = VEBOX_HWS_PGA_GEN7;
                        break;
                }
-       } else if (IS_GEN6(dev_priv)) {
-               mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
+       } else if (IS_GEN(dev_priv, 6)) {
+               hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
        } else {
-               mmio = RING_HWS_PGA(engine->mmio_base);
+               hwsp = RING_HWS_PGA(engine->mmio_base);
        }
 
-       if (INTEL_GEN(dev_priv) >= 6) {
-               u32 mask = ~0u;
+       I915_WRITE(hwsp, offset);
+       POSTING_READ(hwsp);
+}
 
-               /*
-                * Keep the render interrupt unmasked as this papers over
-                * lost interrupts following a reset.
-                */
-               if (engine->id == RCS)
-                       mask &= ~BIT(0);
+static void flush_cs_tlb(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
 
-               I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
-       }
+       if (!IS_GEN_RANGE(dev_priv, 6, 7))
+               return;
 
-       I915_WRITE(mmio, engine->status_page.ggtt_offset);
-       POSTING_READ(mmio);
+       /* ring should be idle before issuing a sync flush*/
+       WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-       /* Flush the TLB for this page */
-       if (IS_GEN(dev_priv, 6, 7)) {
-               i915_reg_t reg = RING_INSTPM(engine->mmio_base);
+       I915_WRITE(instpm,
+                  _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+                                     INSTPM_SYNC_FLUSH));
+       if (intel_wait_for_register(dev_priv,
+                                   instpm, INSTPM_SYNC_FLUSH, 0,
+                                   1000))
+               DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+                         engine->name);
+}
 
-               /* ring should be idle before issuing a sync flush*/
-               WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+static void ring_setup_status_page(struct intel_engine_cs *engine)
+{
+       set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
+       set_hwstam(engine, ~0u);
 
-               I915_WRITE(reg,
-                          _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
-                                             INSTPM_SYNC_FLUSH));
-               if (intel_wait_for_register(dev_priv,
-                                           reg, INSTPM_SYNC_FLUSH, 0,
-                                           1000))
-                       DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
-                                 engine->name);
-       }
+       flush_cs_tlb(engine);
 }
 
 static bool stop_ring(struct intel_engine_cs *engine)
@@ -529,17 +673,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
        if (HWS_NEEDS_PHYSICAL(dev_priv))
                ring_setup_phys_status_page(engine);
        else
-               intel_ring_setup_status_page(engine);
+               ring_setup_status_page(engine);
 
        intel_engine_reset_breadcrumbs(engine);
 
-       if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
-               I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
-               I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
-               if (HAS_VEBOX(dev_priv))
-                       I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
-       }
-
        /* Enforce ordering by reading HEAD register back */
        I915_READ_HEAD(engine);
 
@@ -593,63 +730,87 @@ static int init_ring_common(struct intel_engine_cs *engine)
        }
 
        /* Papering over lost _interrupts_ immediately following the restart */
-       intel_engine_wakeup(engine);
+       intel_engine_queue_breadcrumbs(engine);
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
        return ret;
 }
 
-static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
+static void reset_prepare(struct intel_engine_cs *engine)
 {
        intel_engine_stop_cs(engine);
-
-       if (engine->irq_seqno_barrier)
-               engine->irq_seqno_barrier(engine);
-
-       return i915_gem_find_active_request(engine);
 }
 
-static void skip_request(struct i915_request *rq)
+static void reset_ring(struct intel_engine_cs *engine, bool stalled)
 {
-       void *vaddr = rq->ring->vaddr;
+       struct i915_timeline *tl = &engine->timeline;
+       struct i915_request *pos, *rq;
+       unsigned long flags;
        u32 head;
 
-       head = rq->infix;
-       if (rq->postfix < head) {
-               memset32(vaddr + head, MI_NOOP,
-                        (rq->ring->size - head) / sizeof(u32));
-               head = 0;
+       rq = NULL;
+       spin_lock_irqsave(&tl->lock, flags);
+       list_for_each_entry(pos, &tl->requests, link) {
+               if (!i915_request_completed(pos)) {
+                       rq = pos;
+                       break;
+               }
        }
-       memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
-}
-
-static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
-{
-       GEM_TRACE("%s request global=%d, current=%d\n",
-                 engine->name, rq ? rq->global_seqno : 0,
-                 intel_engine_get_seqno(engine));
 
+       GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
+                 engine->name,
+                 rq ? rq->global_seqno : 0,
+                 intel_engine_get_seqno(engine),
+                 yesno(stalled));
        /*
-        * Try to restore the logical GPU state to match the continuation
-        * of the request queue. If we skip the context/PD restore, then
-        * the next request may try to execute assuming that its context
-        * is valid and loaded on the GPU and so may try to access invalid
-        * memory, prompting repeated GPU hangs.
+        * The guilty request will get skipped on a hung engine.
         *
-        * If the request was guilty, we still restore the logical state
-        * in case the next request requires it (e.g. the aliasing ppgtt),
-        * but skip over the hung batch.
+        * Users of client default contexts do not rely on logical
+        * state preserved between batches so it is safe to execute
+        * queued requests following the hang. Non default contexts
+        * rely on preserved state, so skipping a batch loses the
+        * evolution of the state and it needs to be considered corrupted.
+        * Executing more queued batches on top of corrupted state is
+        * risky. But we take the risk by trying to advance through
+        * the queued requests in order to make the client behaviour
+        * more predictable around resets, by not throwing away random
+        * amount of batches it has prepared for execution. Sophisticated
+        * clients can use gem_reset_stats_ioctl and dma fence status
+        * (exported via sync_file info ioctl on explicit fences) to observe
+        * when it loses the context state and should rebuild accordingly.
         *
-        * If the request was innocent, we try to replay the request with
-        * the restored context.
+        * The context ban, and ultimately the client ban, mechanism are safety
+        * valves if client submission ends up resulting in nothing more than
+        * subsequent hangs.
         */
+
        if (rq) {
-               /* If the rq hung, jump to its breadcrumb and skip the batch */
-               rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
-               if (rq->fence.error == -EIO)
-                       skip_request(rq);
+               /*
+                * Try to restore the logical GPU state to match the
+                * continuation of the request queue. If we skip the
+                * context/PD restore, then the next request may try to execute
+                * assuming that its context is valid and loaded on the GPU and
+                * so may try to access invalid memory, prompting repeated GPU
+                * hangs.
+                *
+                * If the request was guilty, we still restore the logical
+                * state in case the next request requires it (e.g. the
+                * aliasing ppgtt), but skip over the hung batch.
+                *
+                * If the request was innocent, we try to replay the request
+                * with the restored context.
+                */
+               i915_reset_request(rq, stalled);
+
+               GEM_BUG_ON(rq->ring != engine->buffer);
+               head = rq->head;
+       } else {
+               head = engine->buffer->tail;
        }
+       engine->buffer->head = intel_ring_wrap(engine->buffer, head);
+
+       spin_unlock_irqrestore(&tl->lock, flags);
 }
 
 static void reset_finish(struct intel_engine_cs *engine)
@@ -679,7 +840,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
                return ret;
 
        /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-       if (IS_GEN(dev_priv, 4, 6))
+       if (IS_GEN_RANGE(dev_priv, 4, 6))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
        /* We need to disable the AsyncFlip performance optimisations in order
@@ -688,22 +849,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
         */
-       if (IS_GEN(dev_priv, 6, 7))
+       if (IS_GEN_RANGE(dev_priv, 6, 7))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
        /* WaEnableFlushTlbInvalidationMode:snb */
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN(dev_priv, 6))
                I915_WRITE(GFX_MODE,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
        /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-       if (IS_GEN7(dev_priv))
+       if (IS_GEN(dev_priv, 7))
                I915_WRITE(GFX_MODE_GEN7,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-       if (IS_GEN6(dev_priv)) {
+       if (IS_GEN(dev_priv, 6)) {
                /* From the Sandybridge PRM, volume 1 part 3, page 24:
                 * "If this bit is set, STCunit will have LRA as replacement
                 *  policy. [...] This bit must be reset.  LRA replacement
@@ -713,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
        }
 
-       if (IS_GEN(dev_priv, 6, 7))
+       if (IS_GEN_RANGE(dev_priv, 6, 7))
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
        if (INTEL_GEN(dev_priv) >= 6)
@@ -722,33 +883,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
        return 0;
 }
 
-static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
-{
-       struct drm_i915_private *dev_priv = rq->i915;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int num_rings = 0;
-
-       for_each_engine(engine, dev_priv, id) {
-               i915_reg_t mbox_reg;
-
-               if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
-                       continue;
-
-               mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
-               if (i915_mmio_reg_valid(mbox_reg)) {
-                       *cs++ = MI_LOAD_REGISTER_IMM(1);
-                       *cs++ = i915_mmio_reg_offset(mbox_reg);
-                       *cs++ = rq->global_seqno;
-                       num_rings++;
-               }
-       }
-       if (num_rings & 1)
-               *cs++ = MI_NOOP;
-
-       return cs;
-}
-
 static void cancel_requests(struct intel_engine_cs *engine)
 {
        struct i915_request *request;
@@ -760,11 +894,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
        list_for_each_entry(request, &engine->timeline.requests, link) {
                GEM_BUG_ON(!request->global_seqno);
 
-               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                            &request->fence.flags))
-                       continue;
+               if (!i915_request_signaled(request))
+                       dma_fence_set_error(&request->fence, -EIO);
 
-               dma_fence_set_error(&request->fence, -EIO);
+               i915_request_mark_complete(request);
        }
 
        intel_write_status_page(engine,
@@ -786,94 +919,59 @@ static void i9xx_submit_request(struct i915_request *request)
                        intel_ring_set_tail(request->ring, request->tail));
 }
 
-static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 {
+       GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+       GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+       *cs++ = MI_FLUSH;
+
+       *cs++ = MI_STORE_DWORD_INDEX;
+       *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+       *cs++ = rq->fence.seqno;
+
        *cs++ = MI_STORE_DWORD_INDEX;
-       *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+       *cs++ = I915_GEM_HWS_INDEX_ADDR;
        *cs++ = rq->global_seqno;
+
        *cs++ = MI_USER_INTERRUPT;
 
        rq->tail = intel_ring_offset(rq, cs);
        assert_ring_tail_valid(rq->ring, rq->tail);
-}
 
-static const int i9xx_emit_breadcrumb_sz = 4;
-
-static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
-       return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
+       return cs;
 }
 
-static int
-gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
+#define GEN5_WA_STORES 8 /* must be at least 1! */
+static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 {
-       u32 dw1 = MI_SEMAPHORE_MBOX |
-                 MI_SEMAPHORE_COMPARE |
-                 MI_SEMAPHORE_REGISTER;
-       u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
-       u32 *cs;
-
-       WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
+       int i;
 
-       cs = intel_ring_begin(rq, 4);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
+       GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+       GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
 
-       *cs++ = dw1 | wait_mbox;
-       /* Throughout all of the GEM code, seqno passed implies our current
-        * seqno is >= the last seqno executed. However for hardware the
-        * comparison is strictly greater than.
-        */
-       *cs++ = signal->global_seqno - 1;
-       *cs++ = 0;
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
+       *cs++ = MI_FLUSH;
 
-       return 0;
-}
+       *cs++ = MI_STORE_DWORD_INDEX;
+       *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+       *cs++ = rq->fence.seqno;
+
+       BUILD_BUG_ON(GEN5_WA_STORES < 1);
+       for (i = 0; i < GEN5_WA_STORES; i++) {
+               *cs++ = MI_STORE_DWORD_INDEX;
+               *cs++ = I915_GEM_HWS_INDEX_ADDR;
+               *cs++ = rq->global_seqno;
+       }
 
-static void
-gen5_seqno_barrier(struct intel_engine_cs *engine)
-{
-       /* MI_STORE are internally buffered by the GPU and not flushed
-        * either by MI_FLUSH or SyncFlush or any other combination of
-        * MI commands.
-        *
-        * "Only the submission of the store operation is guaranteed.
-        * The write result will be complete (coherent) some time later
-        * (this is practically a finite period but there is no guaranteed
-        * latency)."
-        *
-        * Empirically, we observe that we need a delay of at least 75us to
-        * be sure that the seqno write is visible by the CPU.
-        */
-       usleep_range(125, 250);
-}
+       *cs++ = MI_USER_INTERRUPT;
+       *cs++ = MI_NOOP;
 
-static void
-gen6_seqno_barrier(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
+       rq->tail = intel_ring_offset(rq, cs);
+       assert_ring_tail_valid(rq->ring, rq->tail);
 
-       /* Workaround to force correct ordering between irq and seqno writes on
-        * ivb (and maybe also on snb) by reading from a CS register (like
-        * ACTHD) before reading the status page.
-        *
-        * Note that this effectively stalls the read by the time it takes to
-        * do a memory transaction, which more or less ensures that the write
-        * from the GPU has sufficient time to invalidate the CPU cacheline.
-        * Alternatively we could delay the interrupt from the CS ring to give
-        * the write time to land, but that would incur a delay after every
-        * batch i.e. much more frequent than a delay when waiting for the
-        * interrupt (with the same net latency).
-        *
-        * Also note that to prevent whole machine hangs on gen7, we have to
-        * take the spinlock to guard against concurrent cacheline access.
-        */
-       spin_lock_irq(&dev_priv->uncore.lock);
-       POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
-       spin_unlock_irq(&dev_priv->uncore.lock);
+       return cs;
 }
+#undef GEN5_WA_STORES
 
 static void
 gen5_irq_enable(struct intel_engine_cs *engine)
@@ -948,6 +1046,10 @@ gen6_irq_enable(struct intel_engine_cs *engine)
        I915_WRITE_IMR(engine,
                       ~(engine->irq_enable_mask |
                         engine->irq_keep_mask));
+
+       /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
+
        gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
@@ -966,6 +1068,10 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
        struct drm_i915_private *dev_priv = engine->i915;
 
        I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+
+       /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
+
        gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
@@ -1091,6 +1197,10 @@ int intel_ring_pin(struct intel_ring *ring)
 
        GEM_BUG_ON(ring->vaddr);
 
+       ret = i915_timeline_pin(ring->timeline);
+       if (ret)
+               return ret;
+
        flags = PIN_GLOBAL;
 
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1107,28 +1217,32 @@ int intel_ring_pin(struct intel_ring *ring)
                else
                        ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
                if (unlikely(ret))
-                       return ret;
+                       goto unpin_timeline;
        }
 
        ret = i915_vma_pin(vma, 0, 0, flags);
        if (unlikely(ret))
-               return ret;
+               goto unpin_timeline;
 
        if (i915_vma_is_map_and_fenceable(vma))
                addr = (void __force *)i915_vma_pin_iomap(vma);
        else
                addr = i915_gem_object_pin_map(vma->obj, map);
-       if (IS_ERR(addr))
-               goto err;
+       if (IS_ERR(addr)) {
+               ret = PTR_ERR(addr);
+               goto unpin_ring;
+       }
 
        vma->obj->pin_global++;
 
        ring->vaddr = addr;
        return 0;
 
-err:
+unpin_ring:
        i915_vma_unpin(vma);
-       return PTR_ERR(addr);
+unpin_timeline:
+       i915_timeline_unpin(ring->timeline);
+       return ret;
 }
 
 void intel_ring_reset(struct intel_ring *ring, u32 tail)
@@ -1157,6 +1271,8 @@ void intel_ring_unpin(struct intel_ring *ring)
 
        ring->vma->obj->pin_global--;
        i915_vma_unpin(ring->vma);
+
+       i915_timeline_unpin(ring->timeline);
 }
 
 static struct i915_vma *
@@ -1467,13 +1583,18 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        struct intel_ring *ring;
        int err;
 
-       intel_engine_setup_common(engine);
+       err = intel_engine_setup_common(engine);
+       if (err)
+               return err;
 
-       timeline = i915_timeline_create(engine->i915, engine->name);
+       timeline = i915_timeline_create(engine->i915,
+                                       engine->name,
+                                       engine->status_page.vma);
        if (IS_ERR(timeline)) {
                err = PTR_ERR(timeline);
                goto err;
        }
+       GEM_BUG_ON(timeline->has_initial_breadcrumb);
 
        ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
        i915_timeline_put(timeline);
@@ -1493,6 +1614,8 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        if (err)
                goto err_unpin;
 
+       GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+
        return 0;
 
 err_unpin:
@@ -1581,10 +1704,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
        struct intel_engine_cs *engine = rq->engine;
        enum intel_engine_id id;
        const int num_rings =
-               /* Use an extended w/a on gen7 if signalling from other rings */
-               (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
-               INTEL_INFO(i915)->num_rings - 1 :
-               0;
+               IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
        bool force_restore = false;
        int len;
        u32 *cs;
@@ -1597,7 +1717,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
 
        len = 4;
-       if (IS_GEN7(i915))
+       if (IS_GEN(i915, 7))
                len += 2 + (num_rings ? 4*num_rings + 6 : 0);
        if (flags & MI_FORCE_RESTORE) {
                GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
@@ -1611,7 +1731,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                return PTR_ERR(cs);
 
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-       if (IS_GEN7(i915)) {
+       if (IS_GEN(i915, 7)) {
                *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
                if (num_rings) {
                        struct intel_engine_cs *signaller;
@@ -1658,7 +1778,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
         */
        *cs++ = MI_NOOP;
 
-       if (IS_GEN7(i915)) {
+       if (IS_GEN(i915, 7)) {
                if (num_rings) {
                        struct intel_engine_cs *signaller;
                        i915_reg_t last_reg = {}; /* keep gcc quiet */
@@ -1828,18 +1948,21 @@ static int ring_request_alloc(struct i915_request *request)
        int ret;
 
        GEM_BUG_ON(!request->hw_context->pin_count);
+       GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
 
-       /* Flush enough space to reduce the likelihood of waiting after
+       /*
+        * Flush enough space to reduce the likelihood of waiting after
         * we start building the request - in which case we will just
         * have to repeat work.
         */
        request->reserved_space += LEGACY_REQUEST_SIZE;
 
-       ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+       ret = switch_context(request);
        if (ret)
                return ret;
 
-       ret = switch_context(request);
+       /* Unconditionally invalidate GPU caches and TLBs. */
+       ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
        if (ret)
                return ret;
 
@@ -1881,22 +2004,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
        return 0;
 }
 
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
-{
-       GEM_BUG_ON(bytes > ring->effective_size);
-       if (unlikely(bytes > ring->effective_size - ring->emit))
-               bytes += ring->size - ring->emit;
-
-       if (unlikely(bytes > ring->space)) {
-               int ret = wait_for_space(ring, bytes);
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       GEM_BUG_ON(ring->space < bytes);
-       return 0;
-}
-
 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
 {
        struct intel_ring *ring = rq->ring;
@@ -2129,77 +2236,15 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
        return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
 }
 
-static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
-                                      struct intel_engine_cs *engine)
-{
-       int i;
-
-       if (!HAS_LEGACY_SEMAPHORES(dev_priv))
-               return;
-
-       GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
-       engine->semaphore.sync_to = gen6_ring_sync_to;
-       engine->semaphore.signal = gen6_signal;
-
-       /*
-        * The current semaphore is only applied on pre-gen8
-        * platform.  And there is no VCS2 ring on the pre-gen8
-        * platform. So the semaphore between RCS and VCS2 is
-        * initialized as INVALID.
-        */
-       for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
-               static const struct {
-                       u32 wait_mbox;
-                       i915_reg_t mbox_reg;
-               } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
-                       [RCS_HW] = {
-                               [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
-                               [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
-                               [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
-                       },
-                       [VCS_HW] = {
-                               [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
-                               [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
-                               [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
-                       },
-                       [BCS_HW] = {
-                               [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
-                               [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
-                               [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
-                       },
-                       [VECS_HW] = {
-                               [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
-                               [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
-                               [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
-                       },
-               };
-               u32 wait_mbox;
-               i915_reg_t mbox_reg;
-
-               if (i == engine->hw_id) {
-                       wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
-                       mbox_reg = GEN6_NOSYNC;
-               } else {
-                       wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
-                       mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
-               }
-
-               engine->semaphore.mbox.wait[i] = wait_mbox;
-               engine->semaphore.mbox.signal[i] = mbox_reg;
-       }
-}
-
 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
                                struct intel_engine_cs *engine)
 {
        if (INTEL_GEN(dev_priv) >= 6) {
                engine->irq_enable = gen6_irq_enable;
                engine->irq_disable = gen6_irq_disable;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
        } else if (INTEL_GEN(dev_priv) >= 5) {
                engine->irq_enable = gen5_irq_enable;
                engine->irq_disable = gen5_irq_disable;
-               engine->irq_seqno_barrier = gen5_seqno_barrier;
        } else if (INTEL_GEN(dev_priv) >= 3) {
                engine->irq_enable = i9xx_irq_enable;
                engine->irq_disable = i9xx_irq_disable;
@@ -2231,7 +2276,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
        GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
 
        intel_ring_init_irq(dev_priv, engine);
-       intel_ring_init_semaphores(dev_priv, engine);
 
        engine->init_hw = init_ring_common;
        engine->reset.prepare = reset_prepare;
@@ -2241,18 +2285,14 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
        engine->context_pin = intel_ring_context_pin;
        engine->request_alloc = ring_request_alloc;
 
-       engine->emit_breadcrumb = i9xx_emit_breadcrumb;
-       engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
-       if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
-               int num_rings;
-
-               engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
-
-               num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
-               engine->emit_breadcrumb_sz += num_rings * 3;
-               if (num_rings & 1)
-                       engine->emit_breadcrumb_sz++;
-       }
+       /*
+        * Using a global execution timeline; the previous final breadcrumb is
+        * equivalent to our next initial bread so we can elide
+        * engine->emit_init_breadcrumb().
+        */
+       engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
+       if (IS_GEN(dev_priv, 5))
+               engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
 
        engine->set_default_submission = i9xx_set_default_submission;
 
@@ -2278,12 +2318,15 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 
        engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 
-       if (INTEL_GEN(dev_priv) >= 6) {
+       if (INTEL_GEN(dev_priv) >= 7) {
                engine->init_context = intel_rcs_ctx_init;
                engine->emit_flush = gen7_render_ring_flush;
-               if (IS_GEN6(dev_priv))
-                       engine->emit_flush = gen6_render_ring_flush;
-       } else if (IS_GEN5(dev_priv)) {
+               engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
+       } else if (IS_GEN(dev_priv, 6)) {
+               engine->init_context = intel_rcs_ctx_init;
+               engine->emit_flush = gen6_render_ring_flush;
+               engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
+       } else if (IS_GEN(dev_priv, 5)) {
                engine->emit_flush = gen4_render_ring_flush;
        } else {
                if (INTEL_GEN(dev_priv) < 4)
@@ -2313,13 +2356,18 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 
        if (INTEL_GEN(dev_priv) >= 6) {
                /* gen6 bsd needs a special wa for tail updates */
-               if (IS_GEN6(dev_priv))
+               if (IS_GEN(dev_priv, 6))
                        engine->set_default_submission = gen6_bsd_set_default_submission;
                engine->emit_flush = gen6_bsd_ring_flush;
                engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+
+               if (IS_GEN(dev_priv, 6))
+                       engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+               else
+                       engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
        } else {
                engine->emit_flush = bsd_ring_flush;
-               if (IS_GEN5(dev_priv))
+               if (IS_GEN(dev_priv, 5))
                        engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
                else
                        engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
@@ -2332,11 +2380,18 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
+       GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
+
        intel_ring_default_vfuncs(dev_priv, engine);
 
        engine->emit_flush = gen6_ring_flush;
        engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
+       if (IS_GEN(dev_priv, 6))
+               engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+       else
+               engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+
        return intel_init_ring_buffer(engine);
 }
 
@@ -2344,6 +2399,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
+       GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
+
        intel_ring_default_vfuncs(dev_priv, engine);
 
        engine->emit_flush = gen6_ring_flush;
@@ -2351,5 +2408,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
        engine->irq_enable = hsw_vebox_irq_enable;
        engine->irq_disable = hsw_vebox_irq_disable;
 
+       engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+
        return intel_init_ring_buffer(engine);
 }
index 72edaa7ff4114fc61894f298f8d3952e8c5855c9..710ffb2217753b85026c92996f092890fa434ebf 100644 (file)
@@ -5,6 +5,7 @@
 #include <drm/drm_util.h>
 
 #include <linux/hashtable.h>
+#include <linux/irq_work.h>
 #include <linux/seqlock.h>
 
 #include "i915_gem_batch_pool.h"
@@ -28,12 +29,11 @@ struct i915_sched_attr;
  * workarounds!
  */
 #define CACHELINE_BYTES 64
-#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
 
 struct intel_hw_status_page {
        struct i915_vma *vma;
-       u32 *page_addr;
-       u32 ggtt_offset;
+       u32 *addr;
 };
 
 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
@@ -94,12 +94,12 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
 #define I915_MAX_SUBSLICES 8
 
 #define instdone_slice_mask(dev_priv__) \
-       (IS_GEN7(dev_priv__) ? \
-        1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+       (IS_GEN(dev_priv__, 7) ? \
+        1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
 
 #define instdone_subslice_mask(dev_priv__) \
-       (IS_GEN7(dev_priv__) ? \
-        1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
+       (IS_GEN(dev_priv__, 7) ? \
+        1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
 
 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
        for ((slice__) = 0, (subslice__) = 0; \
@@ -120,13 +120,8 @@ struct intel_instdone {
 struct intel_engine_hangcheck {
        u64 acthd;
        u32 seqno;
-       enum intel_engine_hangcheck_action action;
        unsigned long action_timestamp;
-       int deadlock;
        struct intel_instdone instdone;
-       struct i915_request *active_request;
-       bool stalled:1;
-       bool wedged:1;
 };
 
 struct intel_ring {
@@ -209,6 +204,7 @@ struct i915_priolist {
 
 struct st_preempt_hang {
        struct completion completion;
+       unsigned int count;
        bool inject_hang;
 };
 
@@ -299,14 +295,18 @@ struct intel_engine_execlists {
        unsigned int port_mask;
 
        /**
-        * @queue_priority: Highest pending priority.
+        * @queue_priority_hint: Highest pending priority.
         *
         * When we add requests into the queue, or adjust the priority of
         * executing requests, we compute the maximum priority of those
         * pending requests. We can then use this value to determine if
         * we need to preempt the executing requests to service the queue.
+        * However, since the we may have recorded the priority of an inflight
+        * request we wanted to preempt but since completed, at the time of
+        * dequeuing the priority hint may no longer may match the highest
+        * available request priority.
         */
-       int queue_priority;
+       int queue_priority_hint;
 
        /**
         * @queue: queue of requests, in priority lists
@@ -365,9 +365,6 @@ struct intel_engine_cs {
        struct drm_i915_gem_object *default_state;
        void *pinned_default_state;
 
-       unsigned long irq_posted;
-#define ENGINE_IRQ_BREADCRUMB 0
-
        /* Rather than have every client wait upon all user interrupts,
         * with the herd waking after every interrupt and each doing the
         * heavyweight seqno dance, we delegate the task (of being the
@@ -385,23 +382,14 @@ struct intel_engine_cs {
         * the overhead of waking that client is much preferred.
         */
        struct intel_breadcrumbs {
-               spinlock_t irq_lock; /* protects irq_*; irqsafe */
-               struct intel_wait *irq_wait; /* oldest waiter by retirement */
-
-               spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
-               struct rb_root waiters; /* sorted by retirement, priority */
-               struct list_head signals; /* sorted by retirement */
-               struct task_struct *signaler; /* used for fence signalling */
+               spinlock_t irq_lock;
+               struct list_head signalers;
 
-               struct timer_list fake_irq; /* used after a missed interrupt */
-               struct timer_list hangcheck; /* detect missed interrupts */
+               struct irq_work irq_work; /* for use from inside irq_lock */
 
-               unsigned int hangcheck_interrupts;
                unsigned int irq_enabled;
-               unsigned int irq_count;
 
-               bool irq_armed : 1;
-               I915_SELFTEST_DECLARE(bool mock : 1);
+               bool irq_armed;
        } breadcrumbs;
 
        struct {
@@ -415,16 +403,17 @@ struct intel_engine_cs {
                /**
                 * @enable_count: Reference count for the enabled samplers.
                 *
-                * Index number corresponds to the bit number from @enable.
+                * Index number corresponds to @enum drm_i915_pmu_engine_sample.
                 */
-               unsigned int enable_count[I915_PMU_SAMPLE_BITS];
+               unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
                /**
                 * @sample: Counter values for sampling events.
                 *
                 * Our internal timer stores the current counters in this field.
+                *
+                * Index number corresponds to @enum drm_i915_pmu_engine_sample.
                 */
-#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
-               struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+               struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
        } pmu;
 
        /*
@@ -448,9 +437,8 @@ struct intel_engine_cs {
        int             (*init_hw)(struct intel_engine_cs *engine);
 
        struct {
-               struct i915_request *(*prepare)(struct intel_engine_cs *engine);
-               void (*reset)(struct intel_engine_cs *engine,
-                             struct i915_request *rq);
+               void (*prepare)(struct intel_engine_cs *engine);
+               void (*reset)(struct intel_engine_cs *engine, bool stalled);
                void (*finish)(struct intel_engine_cs *engine);
        } reset;
 
@@ -474,8 +462,10 @@ struct intel_engine_cs {
                                         unsigned int dispatch_flags);
 #define I915_DISPATCH_SECURE BIT(0)
 #define I915_DISPATCH_PINNED BIT(1)
-       void            (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
-       int             emit_breadcrumb_sz;
+       int              (*emit_init_breadcrumb)(struct i915_request *rq);
+       u32             *(*emit_fini_breadcrumb)(struct i915_request *rq,
+                                                u32 *cs);
+       unsigned int    emit_fini_breadcrumb_dw;
 
        /* Pass the request to the hardware queue (e.g. directly into
         * the legacy ringbuffer or to the end of an execlist).
@@ -501,69 +491,8 @@ struct intel_engine_cs {
         */
        void            (*cancel_requests)(struct intel_engine_cs *engine);
 
-       /* Some chipsets are not quite as coherent as advertised and need
-        * an expensive kick to force a true read of the up-to-date seqno.
-        * However, the up-to-date seqno is not always required and the last
-        * seen value is good enough. Note that the seqno will always be
-        * monotonic, even if not coherent.
-        */
-       void            (*irq_seqno_barrier)(struct intel_engine_cs *engine);
        void            (*cleanup)(struct intel_engine_cs *engine);
 
-       /* GEN8 signal/wait table - never trust comments!
-        *        signal to     signal to    signal to   signal to      signal to
-        *          RCS            VCS          BCS        VECS          VCS2
-        *      --------------------------------------------------------------------
-        *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
-        *      |-------------------------------------------------------------------
-        *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
-        *      |-------------------------------------------------------------------
-        *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
-        *      |-------------------------------------------------------------------
-        * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
-        *      |-------------------------------------------------------------------
-        * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
-        *      |-------------------------------------------------------------------
-        *
-        * Generalization:
-        *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
-        *  ie. transpose of g(x, y)
-        *
-        *       sync from      sync from    sync from    sync from     sync from
-        *          RCS            VCS          BCS        VECS          VCS2
-        *      --------------------------------------------------------------------
-        *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
-        *      |-------------------------------------------------------------------
-        *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
-        *      |-------------------------------------------------------------------
-        *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
-        *      |-------------------------------------------------------------------
-        * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
-        *      |-------------------------------------------------------------------
-        * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
-        *      |-------------------------------------------------------------------
-        *
-        * Generalization:
-        *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
-        *  ie. transpose of f(x, y)
-        */
-       struct {
-#define GEN6_SEMAPHORE_LAST    VECS_HW
-#define GEN6_NUM_SEMAPHORES    (GEN6_SEMAPHORE_LAST + 1)
-#define GEN6_SEMAPHORES_MASK   GENMASK(GEN6_SEMAPHORE_LAST, 0)
-               struct {
-                       /* our mbox written by others */
-                       u32             wait[GEN6_NUM_SEMAPHORES];
-                       /* mboxes this ring signals to */
-                       i915_reg_t      signal[GEN6_NUM_SEMAPHORES];
-               } mbox;
-
-               /* AKA wait() */
-               int     (*sync_to)(struct i915_request *rq,
-                                  struct i915_request *signal);
-               u32     *(*signal)(struct i915_request *rq, u32 *cs);
-       } semaphore;
-
        struct intel_engine_execlists execlists;
 
        /* Contexts are pinned whilst they are active on the GPU. The last
@@ -664,7 +593,20 @@ intel_engine_has_preemption(const struct intel_engine_cs *engine)
 
 static inline bool __execlists_need_preempt(int prio, int last)
 {
-       return prio > max(0, last);
+       /*
+        * Allow preemption of low -> normal -> high, but we do
+        * not allow low priority tasks to preempt other low priority
+        * tasks under the impression that latency for low priority
+        * tasks does not matter (as much as background throughput),
+        * so kiss.
+        *
+        * More naturally we would write
+        *      prio >= max(0, last);
+        * except that we wish to prevent triggering preemption at the same
+        * priority level: the task that is running should remain running
+        * to preserve FIFO ordering of dependencies.
+        */
+       return prio > max(I915_PRIORITY_NORMAL - 1, last);
 }
 
 static inline void
@@ -742,7 +684,7 @@ static inline u32
 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
 {
        /* Ensure that the compiler doesn't optimize away the load. */
-       return READ_ONCE(engine->status_page.page_addr[reg]);
+       return READ_ONCE(engine->status_page.addr[reg]);
 }
 
 static inline void
@@ -755,12 +697,12 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
         */
        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
                mb();
-               clflush(&engine->status_page.page_addr[reg]);
-               engine->status_page.page_addr[reg] = value;
-               clflush(&engine->status_page.page_addr[reg]);
+               clflush(&engine->status_page.addr[reg]);
+               engine->status_page.addr[reg] = value;
+               clflush(&engine->status_page.addr[reg]);
                mb();
        } else {
-               WRITE_ONCE(engine->status_page.page_addr[reg], value);
+               WRITE_ONCE(engine->status_page.addr[reg], value);
        }
 }
 
@@ -781,11 +723,13 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
  * The area from dword 0x30 to 0x3ff is available for driver usage.
  */
 #define I915_GEM_HWS_INDEX             0x30
-#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-#define I915_GEM_HWS_PREEMPT_INDEX     0x32
-#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-#define I915_GEM_HWS_SCRATCH_INDEX     0x40
-#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_GEM_HWS_INDEX_ADDR                (I915_GEM_HWS_INDEX * sizeof(u32))
+#define I915_GEM_HWS_PREEMPT           0x32
+#define I915_GEM_HWS_PREEMPT_ADDR      (I915_GEM_HWS_PREEMPT * sizeof(u32))
+#define I915_GEM_HWS_SEQNO             0x40
+#define I915_GEM_HWS_SEQNO_ADDR                (I915_GEM_HWS_SEQNO * sizeof(u32))
+#define I915_GEM_HWS_SCRATCH           0x80
+#define I915_GEM_HWS_SCRATCH_ADDR      (I915_GEM_HWS_SCRATCH * sizeof(u32))
 
 #define I915_HWS_CSB_BUF0_INDEX                0x10
 #define I915_HWS_CSB_WRITE_INDEX       0x1f
@@ -808,7 +752,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
 int __must_check intel_ring_cacheline_align(struct i915_request *rq);
 
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
 u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
 
 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
@@ -889,9 +832,21 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
        return tail;
 }
 
-void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+       /*
+        * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+        * same cacheline, the Head Pointer must not be greater than the Tail
+        * Pointer."
+        */
+       GEM_BUG_ON(!is_power_of_2(size));
+       return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
+void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
-void intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
@@ -903,6 +858,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 int intel_engine_stop_cs(struct intel_engine_cs *engine);
 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
 
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
+
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
@@ -947,102 +904,29 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
                               struct intel_instdone *instdone);
 
-/*
- * Arbitrary size for largest possible 'add request' sequence. The code paths
- * are complex and variable. Empirical measurement shows that the worst case
- * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
- * we need to allocate double the largest single packet within that emission
- * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
- */
-#define MIN_SPACE_FOR_ADD_REQUEST 336
-
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
-       return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
-}
-
-static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
-{
-       return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
-}
-
-/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
-int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
-
-static inline void intel_wait_init(struct intel_wait *wait)
-{
-       wait->tsk = current;
-       wait->request = NULL;
-}
-
-static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
-{
-       wait->tsk = current;
-       wait->seqno = seqno;
-}
-
-static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
-{
-       return wait->seqno;
-}
-
-static inline bool
-intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
-{
-       wait->seqno = seqno;
-       return intel_wait_has_seqno(wait);
-}
-
-static inline bool
-intel_wait_update_request(struct intel_wait *wait,
-                         const struct i915_request *rq)
-{
-       return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
-}
-
-static inline bool
-intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
-{
-       return wait->seqno == seqno;
-}
-
-static inline bool
-intel_wait_check_request(const struct intel_wait *wait,
-                        const struct i915_request *rq)
-{
-       return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
-}
+void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
-static inline bool intel_wait_complete(const struct intel_wait *wait)
-{
-       return RB_EMPTY_NODE(&wait->node);
-}
+void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
+void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
 
-bool intel_engine_add_wait(struct intel_engine_cs *engine,
-                          struct intel_wait *wait);
-void intel_engine_remove_wait(struct intel_engine_cs *engine,
-                             struct intel_wait *wait);
-bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
-void intel_engine_cancel_signaling(struct i915_request *request);
+bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
 
-static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
+static inline void
+intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
 {
-       return READ_ONCE(engine->breadcrumbs.irq_wait);
+       irq_work_queue(&engine->breadcrumbs.irq_work);
 }
 
-unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
-#define ENGINE_WAKEUP_WAITER BIT(0)
-#define ENGINE_WAKEUP_ASLEEP BIT(1)
-
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-
-void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
+bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
 
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+                                   struct drm_printer *p);
+
 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
 {
        memset(batch, 0, 6 * sizeof(u32));
@@ -1055,7 +939,7 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
 }
 
 static inline u32 *
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
 {
        /* We're using qword write, offset should be aligned to 8 bytes. */
        GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
@@ -1065,8 +949,7 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
         * following the batch.
         */
        *cs++ = GFX_OP_PIPE_CONTROL(6);
-       *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
-               PIPE_CONTROL_QW_WRITE;
+       *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
        *cs++ = gtt_offset;
        *cs++ = 0;
        *cs++ = value;
@@ -1092,7 +975,14 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
        return cs;
 }
 
-void intel_engines_sanitize(struct drm_i915_private *i915);
+static inline void intel_engine_reset(struct intel_engine_cs *engine,
+                                     bool stalled)
+{
+       if (engine->reset.reset)
+               engine->reset.reset(engine, stalled);
+}
+
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
 
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
index 4350a5270423c1b924a5e69a75ea0ff27b0ec31f..a017a4232c0fae4580da8b0a59bf54a9ba7fdecb 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/pm_runtime.h>
 #include <linux/vgaarb.h>
 
+#include <drm/drm_print.h>
+
 #include "i915_drv.h"
 #include "intel_drv.h"
 
  * present for a given platform.
  */
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+#include <linux/sort.h>
+
+#define STACKDEPTH 8
+
+static noinline depot_stack_handle_t __save_depot_stack(void)
+{
+       unsigned long entries[STACKDEPTH];
+       struct stack_trace trace = {
+               .entries = entries,
+               .max_entries = ARRAY_SIZE(entries),
+               .skip = 1,
+       };
+
+       save_stack_trace(&trace);
+       if (trace.nr_entries &&
+           trace.entries[trace.nr_entries - 1] == ULONG_MAX)
+               trace.nr_entries--;
+
+       return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+}
+
+static void __print_depot_stack(depot_stack_handle_t stack,
+                               char *buf, int sz, int indent)
+{
+       unsigned long entries[STACKDEPTH];
+       struct stack_trace trace = {
+               .entries = entries,
+               .max_entries = ARRAY_SIZE(entries),
+       };
+
+       depot_fetch_stack(stack, &trace);
+       snprint_stack_trace(buf, sz, &trace, indent);
+}
+
+static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       struct i915_runtime_pm *rpm = &i915->runtime_pm;
+
+       spin_lock_init(&rpm->debug.lock);
+}
+
+static noinline depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       struct i915_runtime_pm *rpm = &i915->runtime_pm;
+       depot_stack_handle_t stack, *stacks;
+       unsigned long flags;
+
+       atomic_inc(&rpm->wakeref_count);
+       assert_rpm_wakelock_held(i915);
+
+       if (!HAS_RUNTIME_PM(i915))
+               return -1;
+
+       stack = __save_depot_stack();
+       if (!stack)
+               return -1;
+
+       spin_lock_irqsave(&rpm->debug.lock, flags);
+
+       if (!rpm->debug.count)
+               rpm->debug.last_acquire = stack;
+
+       stacks = krealloc(rpm->debug.owners,
+                         (rpm->debug.count + 1) * sizeof(*stacks),
+                         GFP_NOWAIT | __GFP_NOWARN);
+       if (stacks) {
+               stacks[rpm->debug.count++] = stack;
+               rpm->debug.owners = stacks;
+       } else {
+               stack = -1;
+       }
+
+       spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+       return stack;
+}
+
+static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+                                           depot_stack_handle_t stack)
+{
+       struct i915_runtime_pm *rpm = &i915->runtime_pm;
+       unsigned long flags, n;
+       bool found = false;
+
+       if (unlikely(stack == -1))
+               return;
+
+       spin_lock_irqsave(&rpm->debug.lock, flags);
+       for (n = rpm->debug.count; n--; ) {
+               if (rpm->debug.owners[n] == stack) {
+                       memmove(rpm->debug.owners + n,
+                               rpm->debug.owners + n + 1,
+                               (--rpm->debug.count - n) * sizeof(stack));
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+       if (WARN(!found,
+                "Unmatched wakeref (tracking %lu), count %u\n",
+                rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
+               char *buf;
+
+               buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               if (!buf)
+                       return;
+
+               __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+               DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
+
+               stack = READ_ONCE(rpm->debug.last_release);
+               if (stack) {
+                       __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+                       DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
+               }
+
+               kfree(buf);
+       }
+}
+
+static int cmphandle(const void *_a, const void *_b)
+{
+       const depot_stack_handle_t * const a = _a, * const b = _b;
+
+       if (*a < *b)
+               return -1;
+       else if (*a > *b)
+               return 1;
+       else
+               return 0;
+}
+
+static void
+__print_intel_runtime_pm_wakeref(struct drm_printer *p,
+                                const struct intel_runtime_pm_debug *dbg)
+{
+       unsigned long i;
+       char *buf;
+
+       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!buf)
+               return;
+
+       if (dbg->last_acquire) {
+               __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
+               drm_printf(p, "Wakeref last acquired:\n%s", buf);
+       }
+
+       if (dbg->last_release) {
+               __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
+               drm_printf(p, "Wakeref last released:\n%s", buf);
+       }
+
+       drm_printf(p, "Wakeref count: %lu\n", dbg->count);
+
+       sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
+
+       for (i = 0; i < dbg->count; i++) {
+               depot_stack_handle_t stack = dbg->owners[i];
+               unsigned long rep;
+
+               rep = 1;
+               while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
+                       rep++, i++;
+               __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+               drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
+       }
+
+       kfree(buf);
+}
+
+static noinline void
+untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       struct i915_runtime_pm *rpm = &i915->runtime_pm;
+       struct intel_runtime_pm_debug dbg = {};
+       struct drm_printer p;
+       unsigned long flags;
+
+       assert_rpm_wakelock_held(i915);
+       if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
+                                       &rpm->debug.lock,
+                                       flags)) {
+               dbg = rpm->debug;
+
+               rpm->debug.owners = NULL;
+               rpm->debug.count = 0;
+               rpm->debug.last_release = __save_depot_stack();
+
+               spin_unlock_irqrestore(&rpm->debug.lock, flags);
+       }
+       if (!dbg.count)
+               return;
+
+       p = drm_debug_printer("i915");
+       __print_intel_runtime_pm_wakeref(&p, &dbg);
+
+       kfree(dbg.owners);
+}
+
+void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+                                   struct drm_printer *p)
+{
+       struct intel_runtime_pm_debug dbg = {};
+
+       do {
+               struct i915_runtime_pm *rpm = &i915->runtime_pm;
+               unsigned long alloc = dbg.count;
+               depot_stack_handle_t *s;
+
+               spin_lock_irq(&rpm->debug.lock);
+               dbg.count = rpm->debug.count;
+               if (dbg.count <= alloc) {
+                       memcpy(dbg.owners,
+                              rpm->debug.owners,
+                              dbg.count * sizeof(*s));
+               }
+               dbg.last_acquire = rpm->debug.last_acquire;
+               dbg.last_release = rpm->debug.last_release;
+               spin_unlock_irq(&rpm->debug.lock);
+               if (dbg.count <= alloc)
+                       break;
+
+               s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
+               if (!s)
+                       goto out;
+
+               dbg.owners = s;
+       } while (1);
+
+       __print_intel_runtime_pm_wakeref(p, &dbg);
+
+out:
+       kfree(dbg.owners);
+}
+
+#else
+
+static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+}
+
+static depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       atomic_inc(&i915->runtime_pm.wakeref_count);
+       assert_rpm_wakelock_held(i915);
+       return -1;
+}
+
+static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       assert_rpm_wakelock_held(i915);
+       atomic_dec(&i915->runtime_pm.wakeref_count);
+}
+
+#endif
+
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
                                         enum i915_power_well_id power_well_id);
 
@@ -509,7 +773,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
         * BIOS's own request bits, which are forced-on for these power wells
         * when exiting DC5/6.
         */
-       if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+       if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
            (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
                val |= I915_READ(regs->bios);
 
@@ -639,10 +903,10 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
  * back on and register state is restored. This is guaranteed by the MMIO write
  * to DC_STATE_EN blocking until the state is restored.
  */
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
 {
-       uint32_t val;
-       uint32_t mask;
+       u32 val;
+       u32 mask;
 
        if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
                state &= dev_priv->csr.allowed_dc_mask;
@@ -1274,7 +1538,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 {
        enum dpio_phy phy;
        enum pipe pipe;
-       uint32_t tmp;
+       u32 tmp;
 
        WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
                     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
@@ -1591,18 +1855,19 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
  * Any power domain reference obtained by this function must have a symmetric
  * call to intel_display_power_put() to release the reference again.
  */
-void intel_display_power_get(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain)
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+                                       enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
-       intel_runtime_pm_get(dev_priv);
+       intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&power_domains->lock);
 
        __intel_display_power_get_domain(dev_priv, domain);
 
        mutex_unlock(&power_domains->lock);
+
+       return wakeref;
 }
 
 /**
@@ -1617,13 +1882,16 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
  * Any power domain reference obtained by this function must have a symmetric
  * call to intel_display_power_put() to release the reference again.
  */
-bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
-                                       enum intel_display_power_domain domain)
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+                                  enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       intel_wakeref_t wakeref;
        bool is_enabled;
 
-       if (!intel_runtime_pm_get_if_in_use(dev_priv))
+       wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+       if (!wakeref)
                return false;
 
        mutex_lock(&power_domains->lock);
@@ -1637,23 +1905,16 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
 
        mutex_unlock(&power_domains->lock);
 
-       if (!is_enabled)
-               intel_runtime_pm_put(dev_priv);
+       if (!is_enabled) {
+               intel_runtime_pm_put(dev_priv, wakeref);
+               wakeref = 0;
+       }
 
-       return is_enabled;
+       return wakeref;
 }
 
-/**
- * intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- */
-void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain)
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
@@ -1671,9 +1932,33 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
                intel_power_well_put(dev_priv, power_well);
 
        mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+                                      enum intel_display_power_domain domain)
+{
+       __intel_display_power_put(dev_priv, domain);
+       intel_runtime_pm_put_unchecked(dev_priv);
+}
 
-       intel_runtime_pm_put(dev_priv);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain,
+                            intel_wakeref_t wakeref)
+{
+       __intel_display_power_put(dev_priv, domain);
+       intel_runtime_pm_put(dev_priv, wakeref);
 }
+#endif
 
 #define I830_PIPES_POWER_DOMAINS (             \
        BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
@@ -3043,10 +3328,10 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
        return 1;
 }
 
-static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
-                                   int enable_dc)
+static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+                              int enable_dc)
 {
-       uint32_t mask;
+       u32 mask;
        int requested_dc;
        int max_dc;
 
@@ -3058,7 +3343,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
                 * suspend/resume, so allow it unconditionally.
                 */
                mask = DC_STATE_EN_DC9;
-       } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
                max_dc = 2;
                mask = 0;
        } else if (IS_GEN9_LP(dev_priv)) {
@@ -3311,7 +3596,7 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
 
 static void icl_mbus_init(struct drm_i915_private *dev_priv)
 {
-       uint32_t val;
+       u32 val;
 
        val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
              MBUS_ABOX_BT_CREDIT_POOL2(16) |
@@ -3622,7 +3907,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
         * current lane status.
         */
        if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
-               uint32_t status = I915_READ(DPLL(PIPE_A));
+               u32 status = I915_READ(DPLL(PIPE_A));
                unsigned int mask;
 
                mask = status & DPLL_PORTB_READY_MASK;
@@ -3653,7 +3938,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
        }
 
        if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
-               uint32_t status = I915_READ(DPIO_PHY_STATUS);
+               u32 status = I915_READ(DPIO_PHY_STATUS);
                unsigned int mask;
 
                mask = status & DPLL_PORTD_READY_MASK;
@@ -3712,7 +3997,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
 
 /**
  * intel_power_domains_init_hw - initialize hardware power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  * @resume: Called from resume code paths or not
  *
  * This function initializes the hardware power domain state and enables all
@@ -3726,30 +4011,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
  * intel_power_domains_enable()) and must be paired with
  * intel_power_domains_fini_hw().
  */
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_domains *power_domains = &i915->power_domains;
 
        power_domains->initializing = true;
 
-       if (IS_ICELAKE(dev_priv)) {
-               icl_display_core_init(dev_priv, resume);
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               cnl_display_core_init(dev_priv, resume);
-       } else if (IS_GEN9_BC(dev_priv)) {
-               skl_display_core_init(dev_priv, resume);
-       } else if (IS_GEN9_LP(dev_priv)) {
-               bxt_display_core_init(dev_priv, resume);
-       } else if (IS_CHERRYVIEW(dev_priv)) {
+       if (IS_ICELAKE(i915)) {
+               icl_display_core_init(i915, resume);
+       } else if (IS_CANNONLAKE(i915)) {
+               cnl_display_core_init(i915, resume);
+       } else if (IS_GEN9_BC(i915)) {
+               skl_display_core_init(i915, resume);
+       } else if (IS_GEN9_LP(i915)) {
+               bxt_display_core_init(i915, resume);
+       } else if (IS_CHERRYVIEW(i915)) {
                mutex_lock(&power_domains->lock);
-               chv_phy_control_init(dev_priv);
+               chv_phy_control_init(i915);
                mutex_unlock(&power_domains->lock);
-       } else if (IS_VALLEYVIEW(dev_priv)) {
+       } else if (IS_VALLEYVIEW(i915)) {
                mutex_lock(&power_domains->lock);
-               vlv_cmnlane_wa(dev_priv);
+               vlv_cmnlane_wa(i915);
                mutex_unlock(&power_domains->lock);
-       } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
-               intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+       } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
+               intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+       }
 
        /*
         * Keep all power wells enabled for any dependent HW access during
@@ -3757,18 +4043,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
         * resources powered until display HW readout is complete. We drop
         * this reference in intel_power_domains_enable().
         */
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       power_domains->wakeref =
+               intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
        /* Disable power support if the user asked so. */
        if (!i915_modparams.disable_power_well)
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-       intel_power_domains_sync_hw(dev_priv);
+               intel_display_power_get(i915, POWER_DOMAIN_INIT);
+       intel_power_domains_sync_hw(i915);
 
        power_domains->initializing = false;
 }
 
 /**
  * intel_power_domains_fini_hw - deinitialize hw power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * De-initializes the display power domain HW state. It also ensures that the
  * device stays powered up so that the driver can be reloaded.
@@ -3777,21 +4065,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
  * intel_power_domains_disable()) and must be paired with
  * intel_power_domains_init_hw().
  */
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
+void intel_power_domains_fini_hw(struct drm_i915_private *i915)
 {
-       /* Keep the power well enabled, but cancel its rpm wakeref. */
-       intel_runtime_pm_put(dev_priv);
+       intel_wakeref_t wakeref __maybe_unused =
+               fetch_and_zero(&i915->power_domains.wakeref);
 
        /* Remove the refcount we took to keep power well support disabled. */
        if (!i915_modparams.disable_power_well)
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+               intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+       intel_power_domains_verify_state(i915);
 
-       intel_power_domains_verify_state(dev_priv);
+       /* Keep the power well enabled, but cancel its rpm wakeref. */
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 /**
  * intel_power_domains_enable - enable toggling of display power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * Enable the ondemand enabling/disabling of the display power wells. Note that
  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
@@ -3801,30 +4092,36 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
  * of display HW readout (which will acquire the power references reflecting
  * the current HW state).
  */
-void intel_power_domains_enable(struct drm_i915_private *dev_priv)
+void intel_power_domains_enable(struct drm_i915_private *i915)
 {
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+       intel_wakeref_t wakeref __maybe_unused =
+               fetch_and_zero(&i915->power_domains.wakeref);
 
-       intel_power_domains_verify_state(dev_priv);
+       intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+       intel_power_domains_verify_state(i915);
 }
 
 /**
  * intel_power_domains_disable - disable toggling of display power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * Disable the ondemand enabling/disabling of the display power wells. See
  * intel_power_domains_enable() for which power wells this call controls.
  */
-void intel_power_domains_disable(struct drm_i915_private *dev_priv)
+void intel_power_domains_disable(struct drm_i915_private *i915)
 {
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       struct i915_power_domains *power_domains = &i915->power_domains;
 
-       intel_power_domains_verify_state(dev_priv);
+       WARN_ON(power_domains->wakeref);
+       power_domains->wakeref =
+               intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+       intel_power_domains_verify_state(i915);
 }
 
 /**
  * intel_power_domains_suspend - suspend power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
  *
  * This function prepares the hardware power domain state before entering
@@ -3833,12 +4130,14 @@ void intel_power_domains_disable(struct drm_i915_private *dev_priv)
  * It must be called with power domains already disabled (after a call to
  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
  */
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+void intel_power_domains_suspend(struct drm_i915_private *i915,
                                 enum i915_drm_suspend_mode suspend_mode)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_domains *power_domains = &i915->power_domains;
+       intel_wakeref_t wakeref __maybe_unused =
+               fetch_and_zero(&power_domains->wakeref);
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+       intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
 
        /*
         * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
@@ -3847,10 +4146,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
         * resources as required and also enable deeper system power states
         * that would be blocked if the firmware was inactive.
         */
-       if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+       if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
            suspend_mode == I915_DRM_SUSPEND_IDLE &&
-           dev_priv->csr.dmc_payload != NULL) {
-               intel_power_domains_verify_state(dev_priv);
+           i915->csr.dmc_payload) {
+               intel_power_domains_verify_state(i915);
                return;
        }
 
@@ -3859,25 +4158,25 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
         * power wells if power domains must be deinitialized for suspend.
         */
        if (!i915_modparams.disable_power_well) {
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-               intel_power_domains_verify_state(dev_priv);
+               intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+               intel_power_domains_verify_state(i915);
        }
 
-       if (IS_ICELAKE(dev_priv))
-               icl_display_core_uninit(dev_priv);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_display_core_uninit(dev_priv);
-       else if (IS_GEN9_BC(dev_priv))
-               skl_display_core_uninit(dev_priv);
-       else if (IS_GEN9_LP(dev_priv))
-               bxt_display_core_uninit(dev_priv);
+       if (IS_ICELAKE(i915))
+               icl_display_core_uninit(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_display_core_uninit(i915);
+       else if (IS_GEN9_BC(i915))
+               skl_display_core_uninit(i915);
+       else if (IS_GEN9_LP(i915))
+               bxt_display_core_uninit(i915);
 
        power_domains->display_core_suspended = true;
 }
 
 /**
  * intel_power_domains_resume - resume power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function resume the hardware power domain state during system resume.
  *
@@ -3885,28 +4184,30 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
  * intel_power_domains_enable()) and must be paired with
  * intel_power_domains_suspend().
  */
-void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+void intel_power_domains_resume(struct drm_i915_private *i915)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_domains *power_domains = &i915->power_domains;
 
        if (power_domains->display_core_suspended) {
-               intel_power_domains_init_hw(dev_priv, true);
+               intel_power_domains_init_hw(i915, true);
                power_domains->display_core_suspended = false;
        } else {
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+               WARN_ON(power_domains->wakeref);
+               power_domains->wakeref =
+                       intel_display_power_get(i915, POWER_DOMAIN_INIT);
        }
 
-       intel_power_domains_verify_state(dev_priv);
+       intel_power_domains_verify_state(i915);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 
-static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
+static void intel_power_domains_dump_info(struct drm_i915_private *i915)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_domains *power_domains = &i915->power_domains;
        struct i915_power_well *power_well;
 
-       for_each_power_well(dev_priv, power_well) {
+       for_each_power_well(i915, power_well) {
                enum intel_display_power_domain domain;
 
                DRM_DEBUG_DRIVER("%-25s %d\n",
@@ -3921,7 +4222,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
 
 /**
  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * Verify if the reference count of each power well matches its HW enabled
  * state and the total refcount of the domains it belongs to. This must be
@@ -3929,22 +4230,21 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
  * acquiring reference counts for any power wells in use and disabling the
  * ones left on by BIOS but not required by any active output.
  */
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_domains *power_domains = &i915->power_domains;
        struct i915_power_well *power_well;
        bool dump_domain_info;
 
        mutex_lock(&power_domains->lock);
 
        dump_domain_info = false;
-       for_each_power_well(dev_priv, power_well) {
+       for_each_power_well(i915, power_well) {
                enum intel_display_power_domain domain;
                int domains_count;
                bool enabled;
 
-               enabled = power_well->desc->ops->is_enabled(dev_priv,
-                                                           power_well);
+               enabled = power_well->desc->ops->is_enabled(i915, power_well);
                if ((power_well->count || power_well->desc->always_on) !=
                    enabled)
                        DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
@@ -3968,7 +4268,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
                static bool dumped;
 
                if (!dumped) {
-                       intel_power_domains_dump_info(dev_priv);
+                       intel_power_domains_dump_info(i915);
                        dumped = true;
                }
        }
@@ -3978,7 +4278,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
 
 #else
 
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
 {
 }
 
@@ -3986,30 +4286,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
 
 /**
  * intel_runtime_pm_get - grab a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function grabs a device-level runtime pm reference (mostly used for GEM
  * code to ensure the GTT or GT is on) and ensures that it is powered up.
  *
  * Any runtime pm reference obtained by this function must have a symmetric
  * call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
  */
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
 {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct pci_dev *pdev = i915->drm.pdev;
        struct device *kdev = &pdev->dev;
        int ret;
 
        ret = pm_runtime_get_sync(kdev);
        WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
 
-       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
-       assert_rpm_wakelock_held(dev_priv);
+       return track_intel_runtime_pm_wakeref(i915);
 }
 
 /**
  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function grabs a device-level runtime pm reference if the device is
  * already in use and ensures that it is powered up. It is illegal to try
@@ -4018,12 +4319,13 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  * Any runtime pm reference obtained by this function must have a symmetric
  * call to intel_runtime_pm_put() to release the reference again.
  *
- * Returns: True if the wakeref was acquired, or False otherwise.
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
+ * as True if the wakeref was acquired, or False otherwise.
  */
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
 {
        if (IS_ENABLED(CONFIG_PM)) {
-               struct pci_dev *pdev = dev_priv->drm.pdev;
+               struct pci_dev *pdev = i915->drm.pdev;
                struct device *kdev = &pdev->dev;
 
                /*
@@ -4033,18 +4335,15 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
                 * atm to the late/early system suspend/resume handlers.
                 */
                if (pm_runtime_get_if_in_use(kdev) <= 0)
-                       return false;
+                       return 0;
        }
 
-       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
-       assert_rpm_wakelock_held(dev_priv);
-
-       return true;
+       return track_intel_runtime_pm_wakeref(i915);
 }
 
 /**
  * intel_runtime_pm_get_noresume - grab a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function grabs a device-level runtime pm reference (mostly used for GEM
  * code to ensure the GTT or GT is on).
@@ -4058,41 +4357,50 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  *
  * Any runtime pm reference obtained by this function must have a symmetric
  * call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
  */
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
 {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct pci_dev *pdev = i915->drm.pdev;
        struct device *kdev = &pdev->dev;
 
-       assert_rpm_wakelock_held(dev_priv);
+       assert_rpm_wakelock_held(i915);
        pm_runtime_get_noresume(kdev);
 
-       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
+       return track_intel_runtime_pm_wakeref(i915);
 }
 
 /**
  * intel_runtime_pm_put - release a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function drops the device-level runtime pm reference obtained by
  * intel_runtime_pm_get() and might power down the corresponding
  * hardware block right away if this is the last reference.
  */
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
 {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct pci_dev *pdev = i915->drm.pdev;
        struct device *kdev = &pdev->dev;
 
-       assert_rpm_wakelock_held(dev_priv);
-       atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+       untrack_intel_runtime_pm_wakeref(i915);
 
        pm_runtime_mark_last_busy(kdev);
        pm_runtime_put_autosuspend(kdev);
 }
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+{
+       cancel_intel_runtime_pm_wakeref(i915, wref);
+       intel_runtime_pm_put_unchecked(i915);
+}
+#endif
+
 /**
  * intel_runtime_pm_enable - enable runtime pm
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
  *
  * This function enables runtime pm at the end of the driver load sequence.
  *
@@ -4100,9 +4408,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  * subordinate display power domains. That is done by
  * intel_power_domains_enable().
  */
-void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_enable(struct drm_i915_private *i915)
 {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct pci_dev *pdev = i915->drm.pdev;
        struct device *kdev = &pdev->dev;
 
        /*
@@ -4124,7 +4432,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
         * so the driver's own RPM reference tracking asserts also work on
         * platforms without RPM support.
         */
-       if (!HAS_RUNTIME_PM(dev_priv)) {
+       if (!HAS_RUNTIME_PM(i915)) {
                int ret;
 
                pm_runtime_dont_use_autosuspend(kdev);
@@ -4142,17 +4450,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
        pm_runtime_put_autosuspend(kdev);
 }
 
-void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_disable(struct drm_i915_private *i915)
 {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct pci_dev *pdev = i915->drm.pdev;
        struct device *kdev = &pdev->dev;
 
        /* Transfer rpm ownership back to core */
-       WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
+       WARN(pm_runtime_get_sync(kdev) < 0,
             "Failed to pass rpm ownership back to core\n");
 
        pm_runtime_dont_use_autosuspend(kdev);
 
-       if (!HAS_RUNTIME_PM(dev_priv))
+       if (!HAS_RUNTIME_PM(i915))
                pm_runtime_put(kdev);
 }
+
+void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
+{
+       struct i915_runtime_pm *rpm = &i915->runtime_pm;
+       int count;
+
+       count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
+       WARN(count,
+            "i915->runtime_pm.wakeref_count=%d on cleanup\n",
+            count);
+
+       untrack_intel_runtime_pm_wakeref(i915);
+}
+
+void intel_runtime_pm_init_early(struct drm_i915_private *i915)
+{
+       init_intel_runtime_pm_wakeref(i915);
+}
index 5805ec1aba122495736167c0f66ee86cd5da249d..e7b0884ba5a57f825a4d3b722e7a9c8b33496551 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/export.h>
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -77,7 +76,7 @@ struct intel_sdvo {
        i915_reg_t sdvo_reg;
 
        /* Active outputs controlled by this SDVO output */
-       uint16_t controlled_output;
+       u16 controlled_output;
 
        /*
         * Capabilities of the SDVO device returned by
@@ -92,33 +91,32 @@ struct intel_sdvo {
        * For multiple function SDVO device,
        * this is for current attached outputs.
        */
-       uint16_t attached_output;
+       u16 attached_output;
 
        /*
         * Hotplug activation bits for this device
         */
-       uint16_t hotplug_active;
+       u16 hotplug_active;
 
        enum port port;
 
        bool has_hdmi_monitor;
        bool has_hdmi_audio;
-       bool rgb_quant_range_selectable;
 
        /* DDC bus used by this SDVO encoder */
-       uint8_t ddc_bus;
+       u8 ddc_bus;
 
        /*
         * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
         */
-       uint8_t dtd_sdvo_flags;
+       u8 dtd_sdvo_flags;
 };
 
 struct intel_sdvo_connector {
        struct intel_connector base;
 
        /* Mark the type of connector */
-       uint16_t output_flag;
+       u16 output_flag;
 
        /* This contains all current supported TV format */
        u8 tv_format_supported[TV_FORMAT_NUM];
@@ -186,7 +184,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
        container_of((conn_state), struct intel_sdvo_connector_state, base.base)
 
 static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags);
 static bool
 intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                              struct intel_sdvo_connector *intel_sdvo_connector,
@@ -748,9 +746,9 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
 static bool
 intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
                                         struct intel_sdvo_connector *intel_sdvo_connector,
-                                        uint16_t clock,
-                                        uint16_t width,
-                                        uint16_t height)
+                                        u16 clock,
+                                        u16 width,
+                                        u16 height)
 {
        struct intel_sdvo_preferred_input_timing_args args;
 
@@ -793,9 +791,9 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val
 static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
                                         const struct drm_display_mode *mode)
 {
-       uint16_t width, height;
-       uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
-       uint16_t h_sync_offset, v_sync_offset;
+       u16 width, height;
+       u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+       u16 h_sync_offset, v_sync_offset;
        int mode_clock;
 
        memset(dtd, 0, sizeof(*dtd));
@@ -900,13 +898,13 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
 }
 
 static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
-                                 uint8_t mode)
+                                 u8 mode)
 {
        return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
 }
 
 static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
-                                      uint8_t mode)
+                                      u8 mode)
 {
        return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
 }
@@ -915,11 +913,11 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
 {
        int i, j;
-       uint8_t set_buf_index[2];
-       uint8_t av_split;
-       uint8_t buf_size;
-       uint8_t buf[48];
-       uint8_t *pos;
+       u8 set_buf_index[2];
+       u8 av_split;
+       u8 buf_size;
+       u8 buf[48];
+       u8 *pos;
 
        intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
 
@@ -942,11 +940,11 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
 #endif
 
 static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
-                                      unsigned if_index, uint8_t tx_rate,
-                                      const uint8_t *data, unsigned length)
+                                      unsigned int if_index, u8 tx_rate,
+                                      const u8 *data, unsigned int length)
 {
-       uint8_t set_buf_index[2] = { if_index, 0 };
-       uint8_t hbuf_size, tmp[8];
+       u8 set_buf_index[2] = { if_index, 0 };
+       u8 hbuf_size, tmp[8];
        int i;
 
        if (!intel_sdvo_set_value(intel_sdvo,
@@ -981,29 +979,30 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
 }
 
 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
-                                        const struct intel_crtc_state *pipe_config)
+                                        const struct intel_crtc_state *pipe_config,
+                                        const struct drm_connector_state *conn_state)
 {
-       uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+       const struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+       u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
        union hdmi_infoframe frame;
        int ret;
        ssize_t len;
 
        ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-                                                      &pipe_config->base.adjusted_mode,
-                                                      false);
+                                                      conn_state->connector,
+                                                      adjusted_mode);
        if (ret < 0) {
                DRM_ERROR("couldn't fill AVI infoframe\n");
                return false;
        }
 
-       if (intel_sdvo->rgb_quant_range_selectable) {
-               if (pipe_config->limited_color_range)
-                       frame.avi.quantization_range =
-                               HDMI_QUANTIZATION_RANGE_LIMITED;
-               else
-                       frame.avi.quantization_range =
-                               HDMI_QUANTIZATION_RANGE_FULL;
-       }
+       drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+                                          conn_state->connector,
+                                          adjusted_mode,
+                                          pipe_config->limited_color_range ?
+                                          HDMI_QUANTIZATION_RANGE_LIMITED :
+                                          HDMI_QUANTIZATION_RANGE_FULL);
 
        len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
        if (len < 0)
@@ -1018,7 +1017,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
                                     const struct drm_connector_state *conn_state)
 {
        struct intel_sdvo_tv_format format;
-       uint32_t format_map;
+       u32 format_map;
 
        format_map = 1 << conn_state->tv.mode;
        memset(&format, 0, sizeof(format));
@@ -1108,9 +1107,9 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
        pipe_config->clock_set = true;
 }
 
-static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
-                                     struct intel_crtc_state *pipe_config,
-                                     struct drm_connector_state *conn_state)
+static int intel_sdvo_compute_config(struct intel_encoder *encoder,
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_sdvo_connector_state *intel_sdvo_state =
@@ -1135,7 +1134,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
         */
        if (IS_TV(intel_sdvo_connector)) {
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
-                       return false;
+                       return -EINVAL;
 
                (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
                                                           intel_sdvo_connector,
@@ -1145,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
        } else if (IS_LVDS(intel_sdvo_connector)) {
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
                                                             intel_sdvo_connector->base.panel.fixed_mode))
-                       return false;
+                       return -EINVAL;
 
                (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
                                                           intel_sdvo_connector,
@@ -1154,7 +1153,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
        }
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        /*
         * Make the CRTC code factor in the SDVO pixel multiplier.  The
@@ -1194,7 +1193,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
        if (intel_sdvo_connector->is_hdmi)
                adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
 
-       return true;
+       return 0;
 }
 
 #define UPDATE_PROPERTY(input, NAME) \
@@ -1209,7 +1208,7 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
        const struct drm_connector_state *conn_state = &sdvo_state->base.base;
        struct intel_sdvo_connector *intel_sdvo_conn =
                to_intel_sdvo_connector(conn_state->connector);
-       uint16_t val;
+       u16 val;
 
        if (intel_sdvo_conn->left)
                UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H);
@@ -1316,7 +1315,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
                intel_sdvo_set_colorimetry(intel_sdvo,
                                           SDVO_COLORIMETRY_RGB256);
-               intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
+               intel_sdvo_set_avi_infoframe(intel_sdvo,
+                                            crtc_state, conn_state);
        } else
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
@@ -1692,10 +1692,10 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
        return true;
 }
 
-static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
+static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
-       uint16_t hotplug;
+       u16 hotplug;
 
        if (!I915_HAS_HOTPLUG(dev_priv))
                return 0;
@@ -1802,8 +1802,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
                        if (intel_sdvo_connector->is_hdmi) {
                                intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
                                intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
-                               intel_sdvo->rgb_quant_range_selectable =
-                                       drm_rgb_quant_range_selectable(edid);
                        }
                } else
                        status = connector_status_disconnected;
@@ -1828,7 +1826,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
-       uint16_t response;
+       u16 response;
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        enum drm_connector_status ret;
@@ -1852,7 +1850,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 
        intel_sdvo->has_hdmi_monitor = false;
        intel_sdvo->has_hdmi_audio = false;
-       intel_sdvo->rgb_quant_range_selectable = false;
 
        if ((intel_sdvo_connector->output_flag & response) == 0)
                ret = connector_status_disconnected;
@@ -1980,7 +1977,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        const struct drm_connector_state *conn_state = connector->state;
        struct intel_sdvo_sdtv_resolution_request tv_res;
-       uint32_t reply = 0, format_map = 0;
+       u32 reply = 0, format_map = 0;
        int i;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2065,7 +2062,7 @@ static int
 intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
                                         const struct drm_connector_state *state,
                                         struct drm_property *property,
-                                        uint64_t *val)
+                                        u64 *val)
 {
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
@@ -2124,7 +2121,7 @@ static int
 intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
                                         struct drm_connector_state *state,
                                         struct drm_property *property,
-                                        uint64_t val)
+                                        u64 val)
 {
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
@@ -2273,7 +2270,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
 static void
 intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
 {
-       uint16_t mask = 0;
+       u16 mask = 0;
        unsigned int num_bits;
 
        /*
@@ -2674,7 +2671,7 @@ err:
 }
 
 static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
 {
        /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
 
@@ -2750,7 +2747,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
 {
        struct drm_device *dev = intel_sdvo->base.base.dev;
        struct intel_sdvo_tv_format format;
-       uint32_t format_map, i;
+       u32 format_map, i;
 
        if (!intel_sdvo_set_target_output(intel_sdvo, type))
                return false;
@@ -2817,7 +2814,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
        struct drm_connector_state *conn_state = connector->state;
        struct intel_sdvo_connector_state *sdvo_state =
                to_intel_sdvo_connector_state(conn_state);
-       uint16_t response, data_value[2];
+       u16 response, data_value[2];
 
        /* when horizontal overscan is supported, Add the left/right property */
        if (enhancements.overscan_h) {
@@ -2928,7 +2925,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
 {
        struct drm_device *dev = intel_sdvo->base.base.dev;
        struct drm_connector *connector = &intel_sdvo_connector->base.base;
-       uint16_t response, data_value[2];
+       u16 response, data_value[2];
 
        ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS);
 
@@ -2942,7 +2939,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
 {
        union {
                struct intel_sdvo_enhancements_reply reply;
-               uint16_t response;
+               u16 response;
        } enhancements;
 
        BUILD_BUG_ON(sizeof(enhancements) != 2);
index d2e003d8f3dbe64247cac7de9baf6a6fcaa0c986..b56a1a9ad01d2e724715503b545d4396f56f7194 100644 (file)
@@ -29,7 +29,6 @@
  * registers; newer ones are much simpler and we can use the new DRM plane
  * support.
  */
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
@@ -322,8 +321,8 @@ skl_program_scaler(struct intel_plane *plane,
                &crtc_state->scaler_state.scalers[scaler_id];
        int crtc_x = plane_state->base.dst.x1;
        int crtc_y = plane_state->base.dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
        u16 y_hphase, uv_rgb_hphase;
        u16 y_vphase, uv_rgb_vphase;
        int hscale, vscale;
@@ -478,23 +477,30 @@ skl_program_plane(struct intel_plane *plane,
        u32 aux_stride = skl_plane_stride(plane_state, 1);
        int crtc_x = plane_state->base.dst.x1;
        int crtc_y = plane_state->base.dst.y1;
-       uint32_t x = plane_state->color_plane[color_plane].x;
-       uint32_t y = plane_state->color_plane[color_plane].y;
-       uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       u32 x = plane_state->color_plane[color_plane].x;
+       u32 y = plane_state->color_plane[color_plane].y;
+       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
        struct intel_plane *linked = plane_state->linked_plane;
        const struct drm_framebuffer *fb = plane_state->base.fb;
        u8 alpha = plane_state->base.alpha >> 8;
+       u32 plane_color_ctl = 0;
        unsigned long irqflags;
        u32 keymsk, keymax;
 
+       plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               plane_color_ctl = plane_state->color_ctl |
+                       glk_plane_color_ctl_crtc(crtc_state);
+
        /* Sizes are 0 based */
        src_w--;
        src_h--;
 
        keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
 
-       keymsk = key->channel_mask & 0x3ffffff;
+       keymsk = key->channel_mask & 0x7ffffff;
        if (alpha < 0xff)
                keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
 
@@ -534,8 +540,7 @@ skl_program_plane(struct intel_plane *plane,
        }
 
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
-                             plane_state->color_ctl);
+               I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
 
        if (fb->format->is_yuv && icl_is_hdr_plane(plane))
                icl_program_input_csc(plane, crtc_state, plane_state);
@@ -619,17 +624,19 @@ skl_plane_get_hw_state(struct intel_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum plane_id plane_id = plane->id;
+       intel_wakeref_t wakeref;
        bool ret;
 
        power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
 
        *pipe = plane->pipe;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -732,6 +739,11 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
                      SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
 }
 
+static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       return SP_GAMMA_ENABLE;
+}
+
 static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
@@ -740,7 +752,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        u32 sprctl;
 
-       sprctl = SP_ENABLE | SP_GAMMA_ENABLE;
+       sprctl = SP_ENABLE;
 
        switch (fb->format->format) {
        case DRM_FORMAT_YUYV:
@@ -807,17 +819,19 @@ vlv_update_plane(struct intel_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum pipe pipe = plane->pipe;
        enum plane_id plane_id = plane->id;
-       u32 sprctl = plane_state->ctl;
        u32 sprsurf_offset = plane_state->color_plane[0].offset;
        u32 linear_offset;
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        int crtc_x = plane_state->base.dst.x1;
        int crtc_y = plane_state->base.dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
-       uint32_t x = plane_state->color_plane[0].x;
-       uint32_t y = plane_state->color_plane[0].y;
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+       u32 x = plane_state->color_plane[0].x;
+       u32 y = plane_state->color_plane[0].y;
        unsigned long irqflags;
+       u32 sprctl;
+
+       sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
 
        /* Sizes are 0 based */
        crtc_w--;
@@ -883,21 +897,36 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum plane_id plane_id = plane->id;
+       intel_wakeref_t wakeref;
        bool ret;
 
        power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
 
        *pipe = plane->pipe;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
 
+static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       u32 sprctl = 0;
+
+       sprctl |= SPRITE_GAMMA_ENABLE;
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+       return sprctl;
+}
+
 static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
@@ -908,14 +937,11 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        u32 sprctl;
 
-       sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE;
+       sprctl = SPRITE_ENABLE;
 
        if (IS_IVYBRIDGE(dev_priv))
                sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
 
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               sprctl |= SPRITE_PIPE_CSC_ENABLE;
-
        switch (fb->format->format) {
        case DRM_FORMAT_XBGR8888:
                sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
@@ -967,20 +993,22 @@ ivb_update_plane(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum pipe pipe = plane->pipe;
-       u32 sprctl = plane_state->ctl, sprscale = 0;
        u32 sprsurf_offset = plane_state->color_plane[0].offset;
        u32 linear_offset;
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        int crtc_x = plane_state->base.dst.x1;
        int crtc_y = plane_state->base.dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
-       uint32_t x = plane_state->color_plane[0].x;
-       uint32_t y = plane_state->color_plane[0].y;
-       uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+       u32 x = plane_state->color_plane[0].x;
+       u32 y = plane_state->color_plane[0].y;
+       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       u32 sprctl, sprscale = 0;
        unsigned long irqflags;
 
+       sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
+
        /* Sizes are 0 based */
        src_w--;
        src_h--;
@@ -1052,17 +1080,19 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
        bool ret;
 
        power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        ret =  I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
 
        *pipe = plane->pipe;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -1075,6 +1105,11 @@ g4x_sprite_max_stride(struct intel_plane *plane,
        return 16384;
 }
 
+static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       return DVS_GAMMA_ENABLE;
+}
+
 static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
@@ -1085,9 +1120,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        u32 dvscntr;
 
-       dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
+       dvscntr = DVS_ENABLE;
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN(dev_priv, 6))
                dvscntr |= DVS_TRICKLE_FEED_DISABLE;
 
        switch (fb->format->format) {
@@ -1141,20 +1176,22 @@ g4x_update_plane(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum pipe pipe = plane->pipe;
-       u32 dvscntr = plane_state->ctl, dvsscale = 0;
        u32 dvssurf_offset = plane_state->color_plane[0].offset;
        u32 linear_offset;
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        int crtc_x = plane_state->base.dst.x1;
        int crtc_y = plane_state->base.dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
-       uint32_t x = plane_state->color_plane[0].x;
-       uint32_t y = plane_state->color_plane[0].y;
-       uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+       u32 x = plane_state->color_plane[0].x;
+       u32 y = plane_state->color_plane[0].y;
+       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       u32 dvscntr, dvsscale = 0;
        unsigned long irqflags;
 
+       dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
+
        /* Sizes are 0 based */
        src_w--;
        src_h--;
@@ -1218,17 +1255,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
        bool ret;
 
        power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return false;
 
        ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
 
        *pipe = plane->pipe;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return ret;
 }
@@ -1699,7 +1738,7 @@ out:
        return ret;
 }
 
-static const uint32_t g4x_plane_formats[] = {
+static const u32 g4x_plane_formats[] = {
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_YUYV,
        DRM_FORMAT_YVYU,
@@ -1707,13 +1746,13 @@ static const uint32_t g4x_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
-static const uint64_t i9xx_plane_format_modifiers[] = {
+static const u64 i9xx_plane_format_modifiers[] = {
        I915_FORMAT_MOD_X_TILED,
        DRM_FORMAT_MOD_LINEAR,
        DRM_FORMAT_MOD_INVALID
 };
 
-static const uint32_t snb_plane_formats[] = {
+static const u32 snb_plane_formats[] = {
        DRM_FORMAT_XBGR8888,
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_YUYV,
@@ -1722,7 +1761,7 @@ static const uint32_t snb_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
-static const uint32_t vlv_plane_formats[] = {
+static const u32 vlv_plane_formats[] = {
        DRM_FORMAT_RGB565,
        DRM_FORMAT_ABGR8888,
        DRM_FORMAT_ARGB8888,
@@ -1736,7 +1775,7 @@ static const uint32_t vlv_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
-static const uint32_t skl_plane_formats[] = {
+static const u32 skl_plane_formats[] = {
        DRM_FORMAT_C8,
        DRM_FORMAT_RGB565,
        DRM_FORMAT_XRGB8888,
@@ -1751,7 +1790,7 @@ static const uint32_t skl_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
-static const uint32_t skl_planar_formats[] = {
+static const u32 skl_planar_formats[] = {
        DRM_FORMAT_C8,
        DRM_FORMAT_RGB565,
        DRM_FORMAT_XRGB8888,
@@ -1767,7 +1806,7 @@ static const uint32_t skl_planar_formats[] = {
        DRM_FORMAT_NV12,
 };
 
-static const uint64_t skl_plane_format_modifiers_noccs[] = {
+static const u64 skl_plane_format_modifiers_noccs[] = {
        I915_FORMAT_MOD_Yf_TILED,
        I915_FORMAT_MOD_Y_TILED,
        I915_FORMAT_MOD_X_TILED,
@@ -1775,7 +1814,7 @@ static const uint64_t skl_plane_format_modifiers_noccs[] = {
        DRM_FORMAT_MOD_INVALID
 };
 
-static const uint64_t skl_plane_format_modifiers_ccs[] = {
+static const u64 skl_plane_format_modifiers_ccs[] = {
        I915_FORMAT_MOD_Yf_TILED_CCS,
        I915_FORMAT_MOD_Y_TILED_CCS,
        I915_FORMAT_MOD_Yf_TILED,
@@ -1983,7 +2022,7 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
        if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
                return false;
 
-       if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
                return false;
 
        if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
@@ -2163,7 +2202,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                plane->check_plane = g4x_sprite_check;
 
                modifiers = i9xx_plane_format_modifiers;
-               if (IS_GEN6(dev_priv)) {
+               if (IS_GEN(dev_priv, 6)) {
                        formats = snb_plane_formats;
                        num_formats = ARRAY_SIZE(snb_plane_formats);
 
index 860f306a23bafbda312d63bafb2039ce67b91069..3924c4944e1f03518a3d119af905e8a2a678faba 100644 (file)
@@ -30,7 +30,6 @@
  * Integrated TV-out support for the 915GM and 945GM.
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -307,7 +306,7 @@ struct tv_mode {
 
        u32 clock;
        u16 refresh; /* in millihertz (for precision) */
-       u32 oversample;
+       u8 oversample;
        u8 hsync_end;
        u16 hblank_start, hblank_end, htotal;
        bool progressive : 1, trilevel_sync : 1, component_only : 1;
@@ -340,7 +339,6 @@ struct tv_mode {
        const struct video_levels *composite_levels, *svideo_levels;
        const struct color_conversion *composite_color, *svideo_color;
        const u32 *filter_table;
-       u16 max_srcw;
 };
 
 
@@ -379,8 +377,8 @@ static const struct tv_mode tv_modes[] = {
                .name           = "NTSC-M",
                .clock          = 108000,
                .refresh        = 59940,
-               .oversample     = TV_OVERSAMPLE_8X,
-               .component_only = 0,
+               .oversample     = 8,
+               .component_only = false,
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
 
                .hsync_end      = 64,               .hblank_end         = 124,
@@ -422,8 +420,8 @@ static const struct tv_mode tv_modes[] = {
                .name           = "NTSC-443",
                .clock          = 108000,
                .refresh        = 59940,
-               .oversample     = TV_OVERSAMPLE_8X,
-               .component_only = 0,
+               .oversample     = 8,
+               .component_only = false,
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
                .hsync_end      = 64,               .hblank_end         = 124,
                .hblank_start   = 836,              .htotal             = 857,
@@ -464,8 +462,8 @@ static const struct tv_mode tv_modes[] = {
                .name           = "NTSC-J",
                .clock          = 108000,
                .refresh        = 59940,
-               .oversample     = TV_OVERSAMPLE_8X,
-               .component_only = 0,
+               .oversample     = 8,
+               .component_only = false,
 
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
                .hsync_end      = 64,               .hblank_end         = 124,
@@ -507,8 +505,8 @@ static const struct tv_mode tv_modes[] = {
                .name           = "PAL-M",
                .clock          = 108000,
                .refresh        = 59940,
-               .oversample     = TV_OVERSAMPLE_8X,
-               .component_only = 0,
+               .oversample     = 8,
+               .component_only = false,
 
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
                .hsync_end      = 64,             .hblank_end           = 124,
@@ -551,8 +549,8 @@ static const struct tv_mode tv_modes[] = {
                .name       = "PAL-N",
                .clock          = 108000,
                .refresh        = 50000,
-               .oversample     = TV_OVERSAMPLE_8X,
-               .component_only = 0,
+               .oversample     = 8,
+               .component_only = false,
 
                .hsync_end      = 64,               .hblank_end         = 128,
                .hblank_start = 844,        .htotal             = 863,
@@ -596,8 +594,8 @@ static const struct tv_mode tv_modes[] = {
                .name       = "PAL",
                .clock          = 108000,
                .refresh        = 50000,
-               .oversample     = TV_OVERSAMPLE_8X,
-               .component_only = 0,
+               .oversample     = 8,
+               .component_only = false,
 
                .hsync_end      = 64,               .hblank_end         = 142,
                .hblank_start   = 844,      .htotal             = 863,
@@ -636,10 +634,10 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name       = "480p",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 59940,
-               .oversample     = TV_OVERSAMPLE_4X,
-               .component_only = 1,
+               .oversample     = 4,
+               .component_only = true,
 
                .hsync_end      = 64,               .hblank_end         = 122,
                .hblank_start   = 842,              .htotal             = 857,
@@ -660,10 +658,10 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name       = "576p",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 50000,
-               .oversample     = TV_OVERSAMPLE_4X,
-               .component_only = 1,
+               .oversample     = 4,
+               .component_only = true,
 
                .hsync_end      = 64,               .hblank_end         = 139,
                .hblank_start   = 859,              .htotal             = 863,
@@ -684,10 +682,10 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name       = "720p@60Hz",
-               .clock          = 148800,
+               .clock          = 148500,
                .refresh        = 60000,
-               .oversample     = TV_OVERSAMPLE_2X,
-               .component_only = 1,
+               .oversample     = 2,
+               .component_only = true,
 
                .hsync_end      = 80,               .hblank_end         = 300,
                .hblank_start   = 1580,             .htotal             = 1649,
@@ -708,10 +706,10 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name       = "720p@50Hz",
-               .clock          = 148800,
+               .clock          = 148500,
                .refresh        = 50000,
-               .oversample     = TV_OVERSAMPLE_2X,
-               .component_only = 1,
+               .oversample     = 2,
+               .component_only = true,
 
                .hsync_end      = 80,               .hblank_end         = 300,
                .hblank_start   = 1580,             .htotal             = 1979,
@@ -729,14 +727,13 @@ static const struct tv_mode tv_modes[] = {
                .burst_ena      = false,
 
                .filter_table = filter_table,
-               .max_srcw = 800
        },
        {
                .name       = "1080i@50Hz",
-               .clock          = 148800,
+               .clock          = 148500,
                .refresh        = 50000,
-               .oversample     = TV_OVERSAMPLE_2X,
-               .component_only = 1,
+               .oversample     = 2,
+               .component_only = true,
 
                .hsync_end      = 88,               .hblank_end         = 235,
                .hblank_start   = 2155,             .htotal             = 2639,
@@ -759,10 +756,10 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name       = "1080i@60Hz",
-               .clock          = 148800,
+               .clock          = 148500,
                .refresh        = 60000,
-               .oversample     = TV_OVERSAMPLE_2X,
-               .component_only = 1,
+               .oversample     = 2,
+               .component_only = true,
 
                .hsync_end      = 88,               .hblank_end         = 235,
                .hblank_start   = 2155,             .htotal             = 2199,
@@ -783,8 +780,115 @@ static const struct tv_mode tv_modes[] = {
 
                .filter_table = filter_table,
        },
+
+       {
+               .name       = "1080p@30Hz",
+               .clock          = 148500,
+               .refresh        = 30000,
+               .oversample     = 2,
+               .component_only = true,
+
+               .hsync_end      = 88,               .hblank_end         = 235,
+               .hblank_start   = 2155,             .htotal             = 2199,
+
+               .progressive    = true,             .trilevel_sync = true,
+
+               .vsync_start_f1 = 8,               .vsync_start_f2     = 8,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,        .veq_start_f1   = 0,
+               .veq_start_f2   = 0,                .veq_len            = 0,
+
+               .vi_end_f1      = 44,               .vi_end_f2          = 44,
+               .nbr_end        = 1079,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+
+       {
+               .name       = "1080p@50Hz",
+               .clock          = 148500,
+               .refresh        = 50000,
+               .oversample     = 1,
+               .component_only = true,
+
+               .hsync_end      = 88,               .hblank_end         = 235,
+               .hblank_start   = 2155,             .htotal             = 2639,
+
+               .progressive    = true,             .trilevel_sync = true,
+
+               .vsync_start_f1 = 8,               .vsync_start_f2     = 8,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,        .veq_start_f1   = 0,
+               .veq_start_f2   = 0,                .veq_len            = 0,
+
+               .vi_end_f1      = 44,               .vi_end_f2          = 44,
+               .nbr_end        = 1079,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+
+       {
+               .name       = "1080p@60Hz",
+               .clock          = 148500,
+               .refresh        = 60000,
+               .oversample     = 1,
+               .component_only = true,
+
+               .hsync_end      = 88,               .hblank_end         = 235,
+               .hblank_start   = 2155,             .htotal             = 2199,
+
+               .progressive    = true,             .trilevel_sync = true,
+
+               .vsync_start_f1 = 8,               .vsync_start_f2     = 8,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,                    .veq_start_f1       = 0,
+               .veq_start_f2   = 0,                .veq_len            = 0,
+
+               .vi_end_f1      = 44,               .vi_end_f2          = 44,
+               .nbr_end        = 1079,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
 };
 
+struct intel_tv_connector_state {
+       struct drm_connector_state base;
+
+       /*
+        * May need to override the user margins for
+        * gen3 >1024 wide source vertical centering.
+        */
+       struct {
+               u16 top, bottom;
+       } margins;
+
+       bool bypass_vfilter;
+};
+
+#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base)
+
+static struct drm_connector_state *
+intel_tv_connector_duplicate_state(struct drm_connector *connector)
+{
+       struct intel_tv_connector_state *state;
+
+       state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
+
+       __drm_atomic_helper_connector_duplicate_state(connector, &state->base);
+       return &state->base;
+}
+
 static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
 {
        return container_of(encoder, struct intel_tv, base);
@@ -860,45 +964,370 @@ intel_tv_mode_valid(struct drm_connector *connector,
        return MODE_CLOCK_RANGE;
 }
 
+static int
+intel_tv_mode_vdisplay(const struct tv_mode *tv_mode)
+{
+       if (tv_mode->progressive)
+               return tv_mode->nbr_end + 1;
+       else
+               return 2 * (tv_mode->nbr_end + 1);
+}
+
+static void
+intel_tv_mode_to_mode(struct drm_display_mode *mode,
+                     const struct tv_mode *tv_mode)
+{
+       mode->clock = tv_mode->clock /
+               (tv_mode->oversample >> !tv_mode->progressive);
+
+       /*
+        * tv_mode horizontal timings:
+        *
+        * hsync_end
+        *    | hblank_end
+        *    |    | hblank_start
+        *    |    |       | htotal
+        *    |     _______    |
+        *     ____/       \___
+        * \__/                \
+        */
+       mode->hdisplay =
+               tv_mode->hblank_start - tv_mode->hblank_end;
+       mode->hsync_start = mode->hdisplay +
+               tv_mode->htotal - tv_mode->hblank_start;
+       mode->hsync_end = mode->hsync_start +
+               tv_mode->hsync_end;
+       mode->htotal = tv_mode->htotal + 1;
+
+       /*
+        * tv_mode vertical timings:
+        *
+        * vsync_start
+        *    | vsync_end
+        *    |  | vi_end nbr_end
+        *    |  |    |       |
+        *    |  |     _______
+        * \__    ____/       \
+        *    \__/
+        */
+       mode->vdisplay = intel_tv_mode_vdisplay(tv_mode);
+       if (tv_mode->progressive) {
+               mode->vsync_start = mode->vdisplay +
+                       tv_mode->vsync_start_f1 + 1;
+               mode->vsync_end = mode->vsync_start +
+                       tv_mode->vsync_len;
+               mode->vtotal = mode->vdisplay +
+                       tv_mode->vi_end_f1 + 1;
+       } else {
+               mode->vsync_start = mode->vdisplay +
+                       tv_mode->vsync_start_f1 + 1 +
+                       tv_mode->vsync_start_f2 + 1;
+               mode->vsync_end = mode->vsync_start +
+                       2 * tv_mode->vsync_len;
+               mode->vtotal = mode->vdisplay +
+                       tv_mode->vi_end_f1 + 1 +
+                       tv_mode->vi_end_f2 + 1;
+       }
+
+       /* TV has it's own notion of sync and other mode flags, so clear them. */
+       mode->flags = 0;
+
+       mode->vrefresh = 0;
+       mode->vrefresh = drm_mode_vrefresh(mode);
+
+       snprintf(mode->name, sizeof(mode->name),
+                "%dx%d%c (%s)",
+                mode->hdisplay, mode->vdisplay,
+                tv_mode->progressive ? 'p' : 'i',
+                tv_mode->name);
+}
+
+static void intel_tv_scale_mode_horiz(struct drm_display_mode *mode,
+                                     int hdisplay, int left_margin,
+                                     int right_margin)
+{
+       int hsync_start = mode->hsync_start - mode->hdisplay + right_margin;
+       int hsync_end = mode->hsync_end - mode->hdisplay + right_margin;
+       int new_htotal = mode->htotal * hdisplay /
+               (mode->hdisplay - left_margin - right_margin);
+
+       mode->clock = mode->clock * new_htotal / mode->htotal;
+
+       mode->hdisplay = hdisplay;
+       mode->hsync_start = hdisplay + hsync_start * new_htotal / mode->htotal;
+       mode->hsync_end = hdisplay + hsync_end * new_htotal / mode->htotal;
+       mode->htotal = new_htotal;
+}
+
+static void intel_tv_scale_mode_vert(struct drm_display_mode *mode,
+                                    int vdisplay, int top_margin,
+                                    int bottom_margin)
+{
+       int vsync_start = mode->vsync_start - mode->vdisplay + bottom_margin;
+       int vsync_end = mode->vsync_end - mode->vdisplay + bottom_margin;
+       int new_vtotal = mode->vtotal * vdisplay /
+               (mode->vdisplay - top_margin - bottom_margin);
+
+       mode->clock = mode->clock * new_vtotal / mode->vtotal;
+
+       mode->vdisplay = vdisplay;
+       mode->vsync_start = vdisplay + vsync_start * new_vtotal / mode->vtotal;
+       mode->vsync_end = vdisplay + vsync_end * new_vtotal / mode->vtotal;
+       mode->vtotal = new_vtotal;
+}
 
 static void
 intel_tv_get_config(struct intel_encoder *encoder,
                    struct intel_crtc_state *pipe_config)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+       struct drm_display_mode mode = {};
+       u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp;
+       struct tv_mode tv_mode = {};
+       int hdisplay = adjusted_mode->crtc_hdisplay;
+       int vdisplay = adjusted_mode->crtc_vdisplay;
+       int xsize, ysize, xpos, ypos;
+
        pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
 
-       pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+       tv_ctl = I915_READ(TV_CTL);
+       hctl1 = I915_READ(TV_H_CTL_1);
+       hctl3 = I915_READ(TV_H_CTL_3);
+       vctl1 = I915_READ(TV_V_CTL_1);
+       vctl2 = I915_READ(TV_V_CTL_2);
+
+       tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
+       tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
+
+       tv_mode.hblank_start = (hctl3 & TV_HBLANK_START_MASK) >> TV_HBLANK_START_SHIFT;
+       tv_mode.hblank_end = (hctl3 & TV_HSYNC_END_MASK) >> TV_HBLANK_END_SHIFT;
+
+       tv_mode.nbr_end = (vctl1 & TV_NBR_END_MASK) >> TV_NBR_END_SHIFT;
+       tv_mode.vi_end_f1 = (vctl1 & TV_VI_END_F1_MASK) >> TV_VI_END_F1_SHIFT;
+       tv_mode.vi_end_f2 = (vctl1 & TV_VI_END_F2_MASK) >> TV_VI_END_F2_SHIFT;
+
+       tv_mode.vsync_len = (vctl2 & TV_VSYNC_LEN_MASK) >> TV_VSYNC_LEN_SHIFT;
+       tv_mode.vsync_start_f1 = (vctl2 & TV_VSYNC_START_F1_MASK) >> TV_VSYNC_START_F1_SHIFT;
+       tv_mode.vsync_start_f2 = (vctl2 & TV_VSYNC_START_F2_MASK) >> TV_VSYNC_START_F2_SHIFT;
+
+       tv_mode.clock = pipe_config->port_clock;
+
+       tv_mode.progressive = tv_ctl & TV_PROGRESSIVE;
+
+       switch (tv_ctl & TV_OVERSAMPLE_MASK) {
+       case TV_OVERSAMPLE_8X:
+               tv_mode.oversample = 8;
+               break;
+       case TV_OVERSAMPLE_4X:
+               tv_mode.oversample = 4;
+               break;
+       case TV_OVERSAMPLE_2X:
+               tv_mode.oversample = 2;
+               break;
+       default:
+               tv_mode.oversample = 1;
+               break;
+       }
+
+       tmp = I915_READ(TV_WIN_POS);
+       xpos = tmp >> 16;
+       ypos = tmp & 0xffff;
+
+       tmp = I915_READ(TV_WIN_SIZE);
+       xsize = tmp >> 16;
+       ysize = tmp & 0xffff;
+
+       intel_tv_mode_to_mode(&mode, &tv_mode);
+
+       DRM_DEBUG_KMS("TV mode:\n");
+       drm_mode_debug_printmodeline(&mode);
+
+       intel_tv_scale_mode_horiz(&mode, hdisplay,
+                                 xpos, mode.hdisplay - xsize - xpos);
+       intel_tv_scale_mode_vert(&mode, vdisplay,
+                                ypos, mode.vdisplay - ysize - ypos);
+
+       adjusted_mode->crtc_clock = mode.clock;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+               adjusted_mode->crtc_clock /= 2;
+
+       /* pixel counter doesn't work on i965gm TV output */
+       if (IS_I965GM(dev_priv))
+               adjusted_mode->private_flags |=
+                       I915_MODE_FLAG_USE_SCANLINE_COUNTER;
 }
 
-static bool
+static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
+                                    int hdisplay)
+{
+       return IS_GEN(dev_priv, 3) && hdisplay > 1024;
+}
+
+static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
+                                 const struct drm_connector_state *conn_state,
+                                 int vdisplay)
+{
+       return tv_mode->crtc_vdisplay -
+               conn_state->tv.margins.top -
+               conn_state->tv.margins.bottom !=
+               vdisplay;
+}
+
+static int
 intel_tv_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config,
                        struct drm_connector_state *conn_state)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_tv_connector_state *tv_conn_state =
+               to_intel_tv_connector_state(conn_state);
        const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
        struct drm_display_mode *adjusted_mode =
                &pipe_config->base.adjusted_mode;
+       int hdisplay = adjusted_mode->crtc_hdisplay;
+       int vdisplay = adjusted_mode->crtc_vdisplay;
 
        if (!tv_mode)
-               return false;
+               return -EINVAL;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
-       adjusted_mode->crtc_clock = tv_mode->clock;
+
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
-       /* TV has it's own notion of sync and other mode flags, so clear them. */
-       adjusted_mode->flags = 0;
+       pipe_config->port_clock = tv_mode->clock;
+
+       intel_tv_mode_to_mode(adjusted_mode, tv_mode);
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
+           !intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) {
+               int extra, top, bottom;
+
+               extra = adjusted_mode->crtc_vdisplay - vdisplay;
+
+               if (extra < 0) {
+                       DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n");
+                       return -EINVAL;
+               }
+
+               /* Need to turn off the vertical filter and center the image */
+
+               /* Attempt to maintain the relative sizes of the margins */
+               top = conn_state->tv.margins.top;
+               bottom = conn_state->tv.margins.bottom;
+
+               if (top + bottom)
+                       top = extra * top / (top + bottom);
+               else
+                       top = extra / 2;
+               bottom = extra - top;
+
+               tv_conn_state->margins.top = top;
+               tv_conn_state->margins.bottom = bottom;
+
+               tv_conn_state->bypass_vfilter = true;
+
+               if (!tv_mode->progressive) {
+                       adjusted_mode->clock /= 2;
+                       adjusted_mode->crtc_clock /= 2;
+                       adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+               }
+       } else {
+               tv_conn_state->margins.top = conn_state->tv.margins.top;
+               tv_conn_state->margins.bottom = conn_state->tv.margins.bottom;
+
+               tv_conn_state->bypass_vfilter = false;
+       }
+
+       DRM_DEBUG_KMS("TV mode:\n");
+       drm_mode_debug_printmodeline(adjusted_mode);
 
        /*
-        * FIXME: We don't check whether the input mode is actually what we want
-        * or whether userspace is doing something stupid.
+        * The pipe scanline counter behaviour looks as follows when
+        * using the TV encoder:
+        *
+        * time ->
+        *
+        * dsl=vtotal-1       |             |
+        *                   ||            ||
+        *               ___| |        ___| |
+        *              /     |       /     |
+        *             /      |      /      |
+        * dsl=0   ___/       |_____/       |
+        *        | | |  |  | |
+        *         ^ ^ ^   ^ ^
+        *         | | |   | pipe vblank/first part of tv vblank
+        *         | | |   bottom margin
+        *         | | active
+        *         | top margin
+        *         remainder of tv vblank
+        *
+        * When the TV encoder is used the pipe wants to run faster
+        * than expected rate. During the active portion the TV
+        * encoder stalls the pipe every few lines to keep it in
+        * check. When the TV encoder reaches the bottom margin the
+        * pipe simply stops. Once we reach the TV vblank the pipe is
+        * no longer stalled and it runs at the max rate (apparently
+        * oversample clock on gen3, cdclk on gen4). Once the pipe
+        * reaches the pipe vtotal the pipe stops for the remainder
+        * of the TV vblank/top margin. The pipe starts up again when
+        * the TV encoder exits the top margin.
+        *
+        * To avoid huge hassles for vblank timestamping we scale
+        * the pipe timings as if the pipe always runs at the average
+        * rate it maintains during the active period. This also
+        * gives us a reasonable guesstimate as to the pixel rate.
+        * Due to the variation in the actual pipe speed the scanline
+        * counter will give us slightly erroneous results during the
+        * TV vblank/margins. But since vtotal was selected such that
+        * it matches the average rate of the pipe during the active
+        * portion the error shouldn't cause any serious grief to
+        * vblank timestamps.
+        *
+        * For posterity here is the empirically derived formula
+        * that gives us the maximum length of the pipe vblank
+        * we can use without causing display corruption. Following
+        * this would allow us to have a ticking scanline counter
+        * everywhere except during the bottom margin (there the
+        * pipe always stops). Ie. this would eliminate the second
+        * flat portion of the above graph. However this would also
+        * complicate vblank timestamping as the pipe vtotal would
+        * no longer match the average rate the pipe runs at during
+        * the active portion. Hence following this formula seems
+        * more trouble that it's worth.
+        *
+        * if (IS_GEN(dev_priv, 4)) {
+        *      num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
+        *      den = tv_mode->clock;
+        * } else {
+        *      num = tv_mode->oversample >> !tv_mode->progressive;
+        *      den = 1;
+        * }
+        * max_pipe_vblank_len ~=
+        *      (num * tv_htotal * (tv_vblank_len + top_margin)) /
+        *      (den * pipe_htotal);
         */
+       intel_tv_scale_mode_horiz(adjusted_mode, hdisplay,
+                                 conn_state->tv.margins.left,
+                                 conn_state->tv.margins.right);
+       intel_tv_scale_mode_vert(adjusted_mode, vdisplay,
+                                tv_conn_state->margins.top,
+                                tv_conn_state->margins.bottom);
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+       adjusted_mode->name[0] = '\0';
+
+       /* pixel counter doesn't work on i965gm TV output */
+       if (IS_I965GM(dev_priv))
+               adjusted_mode->private_flags |=
+                       I915_MODE_FLAG_USE_SCANLINE_COUNTER;
 
-       return true;
+       return 0;
 }
 
 static void
@@ -987,14 +1416,16 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
        struct intel_tv *intel_tv = enc_to_tv(encoder);
+       const struct intel_tv_connector_state *tv_conn_state =
+               to_intel_tv_connector_state(conn_state);
        const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
-       u32 tv_ctl;
+       u32 tv_ctl, tv_filter_ctl;
        u32 scctl1, scctl2, scctl3;
        int i, j;
        const struct video_levels *video_levels;
        const struct color_conversion *color_conversion;
        bool burst_ena;
-       int xpos = 0x0, ypos = 0x0;
+       int xpos, ypos;
        unsigned int xsize, ysize;
 
        if (!tv_mode)
@@ -1030,7 +1461,21 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
        }
 
        tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
-       tv_ctl |= tv_mode->oversample;
+
+       switch (tv_mode->oversample) {
+       case 8:
+               tv_ctl |= TV_OVERSAMPLE_8X;
+               break;
+       case 4:
+               tv_ctl |= TV_OVERSAMPLE_4X;
+               break;
+       case 2:
+               tv_ctl |= TV_OVERSAMPLE_2X;
+               break;
+       default:
+               tv_ctl |= TV_OVERSAMPLE_NONE;
+               break;
+       }
 
        if (tv_mode->progressive)
                tv_ctl |= TV_PROGRESSIVE;
@@ -1082,19 +1527,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
        assert_pipe_disabled(dev_priv, intel_crtc->pipe);
 
        /* Filter ctl must be set before TV_WIN_SIZE */
-       I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+       tv_filter_ctl = TV_AUTO_SCALE;
+       if (tv_conn_state->bypass_vfilter)
+               tv_filter_ctl |= TV_V_FILTER_BYPASS;
+       I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl);
+
        xsize = tv_mode->hblank_start - tv_mode->hblank_end;
-       if (tv_mode->progressive)
-               ysize = tv_mode->nbr_end + 1;
-       else
-               ysize = 2*tv_mode->nbr_end + 1;
+       ysize = intel_tv_mode_vdisplay(tv_mode);
 
-       xpos += conn_state->tv.margins.left;
-       ypos += conn_state->tv.margins.top;
+       xpos = conn_state->tv.margins.left;
+       ypos = tv_conn_state->margins.top;
        xsize -= (conn_state->tv.margins.left +
                  conn_state->tv.margins.right);
-       ysize -= (conn_state->tv.margins.top +
-                 conn_state->tv.margins.bottom);
+       ysize -= (tv_conn_state->margins.top +
+                 tv_conn_state->margins.bottom);
        I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
        I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
 
@@ -1111,23 +1557,6 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
        I915_WRITE(TV_CTL, tv_ctl);
 }
 
-static const struct drm_display_mode reported_modes[] = {
-       {
-               .name = "NTSC 480i",
-               .clock = 107520,
-               .hdisplay = 1280,
-               .hsync_start = 1368,
-               .hsync_end = 1496,
-               .htotal = 1712,
-
-               .vdisplay = 1024,
-               .vsync_start = 1027,
-               .vsync_end = 1034,
-               .vtotal = 1104,
-               .type = DRM_MODE_TYPE_DRIVER,
-       },
-};
-
 static int
 intel_tv_detect_type(struct intel_tv *intel_tv,
                      struct drm_connector *connector)
@@ -1234,16 +1663,18 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
        const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
        int i;
 
-       if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
-               tv_mode->component_only)
+       /* Component supports everything so we can keep the current mode */
+       if (intel_tv->type == DRM_MODE_CONNECTOR_Component)
                return;
 
+       /* If the current mode is fine don't change it */
+       if (!tv_mode->component_only)
+               return;
 
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
-               tv_mode = tv_modes + i;
+               tv_mode = &tv_modes[i];
 
-               if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
-                       tv_mode->component_only)
+               if (!tv_mode->component_only)
                        break;
        }
 
@@ -1255,7 +1686,6 @@ intel_tv_detect(struct drm_connector *connector,
                struct drm_modeset_acquire_ctx *ctx,
                bool force)
 {
-       struct drm_display_mode mode;
        struct intel_tv *intel_tv = intel_attached_tv(connector);
        enum drm_connector_status status;
        int type;
@@ -1264,13 +1694,11 @@ intel_tv_detect(struct drm_connector *connector,
                      connector->base.id, connector->name,
                      force);
 
-       mode = reported_modes[0];
-
        if (force) {
                struct intel_load_detect_pipe tmp;
                int ret;
 
-               ret = intel_get_load_detect_pipe(connector, &mode, &tmp, ctx);
+               ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
                if (ret < 0)
                        return ret;
 
@@ -1294,84 +1722,85 @@ intel_tv_detect(struct drm_connector *connector,
 }
 
 static const struct input_res {
-       const char *name;
-       int w, h;
+       u16 w, h;
 } input_res_table[] = {
-       {"640x480", 640, 480},
-       {"800x600", 800, 600},
-       {"1024x768", 1024, 768},
-       {"1280x1024", 1280, 1024},
-       {"848x480", 848, 480},
-       {"1280x720", 1280, 720},
-       {"1920x1080", 1920, 1080},
+       { 640, 480 },
+       { 800, 600 },
+       { 1024, 768 },
+       { 1280, 1024 },
+       { 848, 480 },
+       { 1280, 720 },
+       { 1920, 1080 },
 };
 
-/*
- * Chose preferred mode  according to line number of TV format
- */
+/* Choose preferred mode according to line number of TV format */
+static bool
+intel_tv_is_preferred_mode(const struct drm_display_mode *mode,
+                          const struct tv_mode *tv_mode)
+{
+       int vdisplay = intel_tv_mode_vdisplay(tv_mode);
+
+       /* prefer 480 line modes for all SD TV modes */
+       if (vdisplay <= 576)
+               vdisplay = 480;
+
+       return vdisplay == mode->vdisplay;
+}
+
 static void
-intel_tv_choose_preferred_modes(const struct tv_mode *tv_mode,
-                              struct drm_display_mode *mode_ptr)
+intel_tv_set_mode_type(struct drm_display_mode *mode,
+                      const struct tv_mode *tv_mode)
 {
-       if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
-               mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
-       else if (tv_mode->nbr_end > 480) {
-               if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
-                       if (mode_ptr->vdisplay == 720)
-                               mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
-               } else if (mode_ptr->vdisplay == 1080)
-                               mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
-       }
+       mode->type = DRM_MODE_TYPE_DRIVER;
+
+       if (intel_tv_is_preferred_mode(mode, tv_mode))
+               mode->type |= DRM_MODE_TYPE_PREFERRED;
 }
 
 static int
 intel_tv_get_modes(struct drm_connector *connector)
 {
-       struct drm_display_mode *mode_ptr;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
-       int j, count = 0;
-       u64 tmp;
+       int i, count = 0;
 
-       for (j = 0; j < ARRAY_SIZE(input_res_table);
-            j++) {
-               const struct input_res *input = &input_res_table[j];
-               unsigned int hactive_s = input->w;
-               unsigned int vactive_s = input->h;
+       for (i = 0; i < ARRAY_SIZE(input_res_table); i++) {
+               const struct input_res *input = &input_res_table[i];
+               struct drm_display_mode *mode;
 
-               if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+               if (input->w > 1024 &&
+                   !tv_mode->progressive &&
+                   !tv_mode->component_only)
                        continue;
 
-               if (input->w > 1024 && (!tv_mode->progressive
-                                       && !tv_mode->component_only))
+               /* no vertical scaling with wide sources on gen3 */
+               if (IS_GEN(dev_priv, 3) && input->w > 1024 &&
+                   input->h > intel_tv_mode_vdisplay(tv_mode))
                        continue;
 
-               mode_ptr = drm_mode_create(connector->dev);
-               if (!mode_ptr)
+               mode = drm_mode_create(connector->dev);
+               if (!mode)
                        continue;
-               strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
-
-               mode_ptr->hdisplay = hactive_s;
-               mode_ptr->hsync_start = hactive_s + 1;
-               mode_ptr->hsync_end = hactive_s + 64;
-               if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
-                       mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
-               mode_ptr->htotal = hactive_s + 96;
-
-               mode_ptr->vdisplay = vactive_s;
-               mode_ptr->vsync_start = vactive_s + 1;
-               mode_ptr->vsync_end = vactive_s + 32;
-               if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
-                       mode_ptr->vsync_end = mode_ptr->vsync_start  + 1;
-               mode_ptr->vtotal = vactive_s + 33;
-
-               tmp = mul_u32_u32(tv_mode->refresh, mode_ptr->vtotal);
-               tmp *= mode_ptr->htotal;
-               tmp = div_u64(tmp, 1000000);
-               mode_ptr->clock = (int) tmp;
-
-               mode_ptr->type = DRM_MODE_TYPE_DRIVER;
-               intel_tv_choose_preferred_modes(tv_mode, mode_ptr);
-               drm_mode_probed_add(connector, mode_ptr);
+
+               /*
+                * We take the TV mode and scale it to look
+                * like it had the expected h/vdisplay. This
+                * provides the most information to userspace
+                * about the actual timings of the mode. We
+                * do ignore the margins though.
+                */
+               intel_tv_mode_to_mode(mode, tv_mode);
+               if (count == 0) {
+                       DRM_DEBUG_KMS("TV mode:\n");
+                       drm_mode_debug_printmodeline(mode);
+               }
+               intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
+               intel_tv_scale_mode_vert(mode, input->h, 0, 0);
+               intel_tv_set_mode_type(mode, tv_mode);
+
+               drm_mode_set_name(mode);
+
+               drm_mode_probed_add(connector, mode);
                count++;
        }
 
@@ -1384,7 +1813,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
        .destroy = intel_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_duplicate_state = intel_tv_connector_duplicate_state,
 };
 
 static int intel_tv_atomic_check(struct drm_connector *connector,
@@ -1531,11 +1960,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
        connector->doublescan_allowed = false;
 
        /* Create TV properties then attach current values */
-       for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+       for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+               /* 1080p50/1080p60 not supported on gen3 */
+               if (IS_GEN(dev_priv, 3) &&
+                   tv_modes[i].oversample == 1)
+                       break;
+
                tv_format_names[i] = tv_modes[i].name;
-       drm_mode_create_tv_properties(dev,
-                                     ARRAY_SIZE(tv_modes),
-                                     tv_format_names);
+       }
+       drm_mode_create_tv_properties(dev, i, tv_format_names);
 
        drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
                                   state->tv.mode);
index b34c318b238dad37a027aad52b5a920e2c73f3dc..e711eb3268bccb39f631e6018567fe09b9e053f0 100644 (file)
@@ -26,6 +26,7 @@
 #include "intel_guc_submission.h"
 #include "intel_guc.h"
 #include "i915_drv.h"
+#include "i915_reset.h"
 
 static void guc_free_load_err_log(struct intel_guc *guc);
 
@@ -71,7 +72,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915)
 {
        int guc_log_level;
 
-       if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
+       if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
                guc_log_level = GUC_LOG_LEVEL_DISABLED;
        else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
                 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -112,11 +113,11 @@ static void sanitize_options_early(struct drm_i915_private *i915)
 
        DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
                         i915_modparams.enable_guc,
-                        yesno(intel_uc_is_using_guc_submission()),
-                        yesno(intel_uc_is_using_huc()));
+                        yesno(intel_uc_is_using_guc_submission(i915)),
+                        yesno(intel_uc_is_using_huc(i915)));
 
        /* Verify GuC firmware availability */
-       if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
+       if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "enable_guc", i915_modparams.enable_guc,
                         !HAS_GUC(i915) ? "no GuC hardware" :
@@ -124,7 +125,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
        }
 
        /* Verify HuC firmware availability */
-       if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
+       if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "enable_guc", i915_modparams.enable_guc,
                         !HAS_HUC(i915) ? "no HuC hardware" :
@@ -136,7 +137,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
                i915_modparams.guc_log_level =
                        __get_default_guc_log_level(i915);
 
-       if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
+       if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "guc_log_level", i915_modparams.guc_log_level,
                         !HAS_GUC(i915) ? "no GuC hardware" :
@@ -354,7 +355,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
 
        /* WaEnableuKernelHeaderValidFix:skl */
        /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
-       if (IS_GEN9(i915))
+       if (IS_GEN(i915, 9))
                attempts = 3;
        else
                attempts = 1;
index 25d73ada74ae455fed80d2699b69fcb4978adae6..870faf9011b979c4d450146d241e1c8a2f1dce56 100644 (file)
@@ -41,19 +41,19 @@ void intel_uc_fini(struct drm_i915_private *dev_priv);
 int intel_uc_suspend(struct drm_i915_private *dev_priv);
 int intel_uc_resume(struct drm_i915_private *dev_priv);
 
-static inline bool intel_uc_is_using_guc(void)
+static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
 {
        GEM_BUG_ON(i915_modparams.enable_guc < 0);
        return i915_modparams.enable_guc > 0;
 }
 
-static inline bool intel_uc_is_using_guc_submission(void)
+static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
 {
        GEM_BUG_ON(i915_modparams.enable_guc < 0);
        return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
 }
 
-static inline bool intel_uc_is_using_huc(void)
+static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
 {
        GEM_BUG_ON(i915_modparams.enable_guc < 0);
        return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
index fd496416087c491bfaeffd94caa131707cd28f07..becf05ebae4d3597fe021e0a7b706ba3fc2cd10e 100644 (file)
@@ -46,12 +46,17 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
        size_t size;
        int err;
 
+       if (!uc_fw->path) {
+               dev_info(dev_priv->drm.dev,
+                        "%s: No firmware was defined for %s!\n",
+                        intel_uc_fw_type_repr(uc_fw->type),
+                        intel_platform_name(INTEL_INFO(dev_priv)->platform));
+               return;
+       }
+
        DRM_DEBUG_DRIVER("%s fw fetch %s\n",
                         intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
 
-       if (!uc_fw->path)
-               return;
-
        uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
        DRM_DEBUG_DRIVER("%s fw fetch %s\n",
                         intel_uc_fw_type_repr(uc_fw->type),
index 9289515108c3182a6b0aa363639a631eed97e7a0..75646a1e0051c336b386c60057abaa9807c9c57e 100644 (file)
@@ -528,7 +528,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                ret |= vlv_check_for_unclaimed_mmio(dev_priv);
 
-       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+       if (IS_GEN_RANGE(dev_priv, 6, 7))
                ret |= gen6_check_for_fifo_debug(dev_priv);
 
        return ret;
@@ -556,7 +556,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
                dev_priv->uncore.funcs.force_wake_get(dev_priv,
                                                      restore_forcewake);
 
-               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+               if (IS_GEN_RANGE(dev_priv, 6, 7))
                        dev_priv->uncore.fifo_count =
                                fifo_free_entries(dev_priv);
                spin_unlock_irq(&dev_priv->uncore.lock);
@@ -1398,7 +1398,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
                return;
 
-       if (IS_GEN6(dev_priv)) {
+       if (IS_GEN(dev_priv, 6)) {
                dev_priv->uncore.fw_reset = 0;
                dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
                dev_priv->uncore.fw_clear = 0;
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
                                       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
                                       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
                }
-       } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
+       } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_fallback;
                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1503,7 +1503,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
                        fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                                       FORCEWAKE, FORCEWAKE_ACK);
                }
-       } else if (IS_GEN6(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 6)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_thread_status;
                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1567,13 +1567,13 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
        dev_priv->uncore.pmic_bus_access_nb.notifier_call =
                i915_pmic_bus_access_notifier;
 
-       if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
+       if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
                ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
-       } else if (IS_GEN5(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 5)) {
                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
                ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
-       } else if (IS_GEN(dev_priv, 6, 7)) {
+       } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
 
                if (IS_VALLEYVIEW(dev_priv)) {
@@ -1582,7 +1582,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
                } else {
                        ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
                }
-       } else if (IS_GEN8(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 8)) {
                if (IS_CHERRYVIEW(dev_priv)) {
                        ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
                        ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1592,7 +1592,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
                        ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
                        ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
                }
-       } else if (IS_GEN(dev_priv, 9, 10)) {
+       } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
                ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
                ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_reg_read *reg = data;
        struct reg_whitelist const *entry;
+       intel_wakeref_t wakeref;
        unsigned int flags;
        int remain;
        int ret = 0;
@@ -1695,286 +1696,25 @@ int i915_reg_read_ioctl(struct drm_device *dev,
 
        flags = reg->offset & (entry->size - 1);
 
-       intel_runtime_pm_get(dev_priv);
-       if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
-               reg->val = I915_READ64_2x32(entry->offset_ldw,
-                                           entry->offset_udw);
-       else if (entry->size == 8 && flags == 0)
-               reg->val = I915_READ64(entry->offset_ldw);
-       else if (entry->size == 4 && flags == 0)
-               reg->val = I915_READ(entry->offset_ldw);
-       else if (entry->size == 2 && flags == 0)
-               reg->val = I915_READ16(entry->offset_ldw);
-       else if (entry->size == 1 && flags == 0)
-               reg->val = I915_READ8(entry->offset_ldw);
-       else
-               ret = -EINVAL;
-       intel_runtime_pm_put(dev_priv);
-
-       return ret;
-}
-
-static void gen3_stop_engine(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       const u32 base = engine->mmio_base;
-
-       if (intel_engine_stop_cs(engine))
-               DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
-
-       I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
-       POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
-
-       I915_WRITE_FW(RING_HEAD(base), 0);
-       I915_WRITE_FW(RING_TAIL(base), 0);
-       POSTING_READ_FW(RING_TAIL(base));
-
-       /* The ring must be empty before it is disabled */
-       I915_WRITE_FW(RING_CTL(base), 0);
-
-       /* Check acts as a post */
-       if (I915_READ_FW(RING_HEAD(base)) != 0)
-               DRM_DEBUG_DRIVER("%s: ring head not parked\n",
-                                engine->name);
-}
-
-static void i915_stop_engines(struct drm_i915_private *dev_priv,
-                             unsigned int engine_mask)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       if (INTEL_GEN(dev_priv) < 3)
-               return;
-
-       for_each_engine_masked(engine, dev_priv, engine_mask, id)
-               gen3_stop_engine(engine);
-}
-
-static bool i915_in_reset(struct pci_dev *pdev)
-{
-       u8 gdrst;
-
-       pci_read_config_byte(pdev, I915_GDRST, &gdrst);
-       return gdrst & GRDOM_RESET_STATUS;
-}
-
-static int i915_do_reset(struct drm_i915_private *dev_priv,
-                        unsigned int engine_mask,
-                        unsigned int retry)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       int err;
-
-       /* Assert reset for at least 20 usec, and wait for acknowledgement. */
-       pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
-       usleep_range(50, 200);
-       err = wait_for(i915_in_reset(pdev), 500);
-
-       /* Clear the reset request. */
-       pci_write_config_byte(pdev, I915_GDRST, 0);
-       usleep_range(50, 200);
-       if (!err)
-               err = wait_for(!i915_in_reset(pdev), 500);
-
-       return err;
-}
-
-static bool g4x_reset_complete(struct pci_dev *pdev)
-{
-       u8 gdrst;
-
-       pci_read_config_byte(pdev, I915_GDRST, &gdrst);
-       return (gdrst & GRDOM_RESET_ENABLE) == 0;
-}
-
-static int g33_do_reset(struct drm_i915_private *dev_priv,
-                       unsigned int engine_mask,
-                       unsigned int retry)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-
-       pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
-       return wait_for(g4x_reset_complete(pdev), 500);
-}
-
-static int g4x_do_reset(struct drm_i915_private *dev_priv,
-                       unsigned int engine_mask,
-                       unsigned int retry)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       int ret;
-
-       /* WaVcpClkGateDisableForMediaReset:ctg,elk */
-       I915_WRITE(VDECCLK_GATE_D,
-                  I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
-       POSTING_READ(VDECCLK_GATE_D);
-
-       pci_write_config_byte(pdev, I915_GDRST,
-                             GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-       ret =  wait_for(g4x_reset_complete(pdev), 500);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Wait for media reset failed\n");
-               goto out;
-       }
-
-       pci_write_config_byte(pdev, I915_GDRST,
-                             GRDOM_RENDER | GRDOM_RESET_ENABLE);
-       ret =  wait_for(g4x_reset_complete(pdev), 500);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Wait for render reset failed\n");
-               goto out;
-       }
-
-out:
-       pci_write_config_byte(pdev, I915_GDRST, 0);
-
-       I915_WRITE(VDECCLK_GATE_D,
-                  I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
-       POSTING_READ(VDECCLK_GATE_D);
-
-       return ret;
-}
-
-static int ironlake_do_reset(struct drm_i915_private *dev_priv,
-                            unsigned int engine_mask,
-                            unsigned int retry)
-{
-       int ret;
-
-       I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
-       ret = intel_wait_for_register(dev_priv,
-                                     ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
-                                     500);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Wait for render reset failed\n");
-               goto out;
-       }
-
-       I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
-       ret = intel_wait_for_register(dev_priv,
-                                     ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
-                                     500);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Wait for media reset failed\n");
-               goto out;
+       with_intel_runtime_pm(dev_priv, wakeref) {
+               if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
+                       reg->val = I915_READ64_2x32(entry->offset_ldw,
+                                                   entry->offset_udw);
+               else if (entry->size == 8 && flags == 0)
+                       reg->val = I915_READ64(entry->offset_ldw);
+               else if (entry->size == 4 && flags == 0)
+                       reg->val = I915_READ(entry->offset_ldw);
+               else if (entry->size == 2 && flags == 0)
+                       reg->val = I915_READ16(entry->offset_ldw);
+               else if (entry->size == 1 && flags == 0)
+                       reg->val = I915_READ8(entry->offset_ldw);
+               else
+                       ret = -EINVAL;
        }
 
-out:
-       I915_WRITE(ILK_GDSR, 0);
-       POSTING_READ(ILK_GDSR);
        return ret;
 }
 
-/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
-                               u32 hw_domain_mask)
-{
-       int err;
-
-       /* GEN6_GDRST is not in the gt power well, no need to check
-        * for fifo space for the write or forcewake the chip for
-        * the read
-        */
-       __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
-
-       /* Wait for the device to ack the reset requests */
-       err = __intel_wait_for_register_fw(dev_priv,
-                                          GEN6_GDRST, hw_domain_mask, 0,
-                                          500, 0,
-                                          NULL);
-       if (err)
-               DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
-                                hw_domain_mask);
-
-       return err;
-}
-
-/**
- * gen6_reset_engines - reset individual engines
- * @dev_priv: i915 device
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
- * @retry: the count of of previous attempts to reset.
- *
- * This function will reset the individual engines that are set in engine_mask.
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
- *
- * Note: It is responsibility of the caller to handle the difference between
- * asking full domain reset versus reset for all available individual engines.
- *
- * Returns 0 on success, nonzero on error.
- */
-static int gen6_reset_engines(struct drm_i915_private *dev_priv,
-                             unsigned int engine_mask,
-                             unsigned int retry)
-{
-       struct intel_engine_cs *engine;
-       const u32 hw_engine_mask[I915_NUM_ENGINES] = {
-               [RCS] = GEN6_GRDOM_RENDER,
-               [BCS] = GEN6_GRDOM_BLT,
-               [VCS] = GEN6_GRDOM_MEDIA,
-               [VCS2] = GEN8_GRDOM_MEDIA2,
-               [VECS] = GEN6_GRDOM_VECS,
-       };
-       u32 hw_mask;
-
-       if (engine_mask == ALL_ENGINES) {
-               hw_mask = GEN6_GRDOM_FULL;
-       } else {
-               unsigned int tmp;
-
-               hw_mask = 0;
-               for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
-                       hw_mask |= hw_engine_mask[engine->id];
-       }
-
-       return gen6_hw_domain_reset(dev_priv, hw_mask);
-}
-
-/**
- * gen11_reset_engines - reset individual engines
- * @dev_priv: i915 device
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
- *
- * This function will reset the individual engines that are set in engine_mask.
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
- *
- * Note: It is responsibility of the caller to handle the difference between
- * asking full domain reset versus reset for all available individual engines.
- *
- * Returns 0 on success, nonzero on error.
- */
-static int gen11_reset_engines(struct drm_i915_private *dev_priv,
-                              unsigned int engine_mask)
-{
-       struct intel_engine_cs *engine;
-       const u32 hw_engine_mask[I915_NUM_ENGINES] = {
-               [RCS] = GEN11_GRDOM_RENDER,
-               [BCS] = GEN11_GRDOM_BLT,
-               [VCS] = GEN11_GRDOM_MEDIA,
-               [VCS2] = GEN11_GRDOM_MEDIA2,
-               [VCS3] = GEN11_GRDOM_MEDIA3,
-               [VCS4] = GEN11_GRDOM_MEDIA4,
-               [VECS] = GEN11_GRDOM_VECS,
-               [VECS2] = GEN11_GRDOM_VECS2,
-       };
-       u32 hw_mask;
-
-       BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
-
-       if (engine_mask == ALL_ENGINES) {
-               hw_mask = GEN11_GRDOM_FULL;
-       } else {
-               unsigned int tmp;
-
-               hw_mask = 0;
-               for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
-                       hw_mask |= hw_engine_mask[engine->id];
-       }
-
-       return gen6_hw_domain_reset(dev_priv, hw_mask);
-}
-
 /**
  * __intel_wait_for_register_fw - wait until register matches expected state
  * @dev_priv: the i915 device
@@ -2079,202 +1819,15 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
                                 (reg_value & mask) == value,
                                 slow_timeout_ms * 1000, 10, 1000);
 
+       /* just trace the final value */
+       trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
        if (out_value)
                *out_value = reg_value;
 
        return ret;
 }
 
-static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
-                     _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
-
-       ret = __intel_wait_for_register_fw(dev_priv,
-                                          RING_RESET_CTL(engine->mmio_base),
-                                          RESET_CTL_READY_TO_RESET,
-                                          RESET_CTL_READY_TO_RESET,
-                                          700, 0,
-                                          NULL);
-       if (ret)
-               DRM_ERROR("%s: reset request timeout\n", engine->name);
-
-       return ret;
-}
-
-static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
-                     _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
-}
-
-static int reset_engines(struct drm_i915_private *i915,
-                        unsigned int engine_mask,
-                        unsigned int retry)
-{
-       if (INTEL_GEN(i915) >= 11)
-               return gen11_reset_engines(i915, engine_mask);
-       else
-               return gen6_reset_engines(i915, engine_mask, retry);
-}
-
-static int gen8_reset_engines(struct drm_i915_private *dev_priv,
-                             unsigned int engine_mask,
-                             unsigned int retry)
-{
-       struct intel_engine_cs *engine;
-       const bool reset_non_ready = retry >= 1;
-       unsigned int tmp;
-       int ret;
-
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-               ret = gen8_engine_reset_prepare(engine);
-               if (ret && !reset_non_ready)
-                       goto skip_reset;
-
-               /*
-                * If this is not the first failed attempt to prepare,
-                * we decide to proceed anyway.
-                *
-                * By doing so we risk context corruption and with
-                * some gens (kbl), possible system hang if reset
-                * happens during active bb execution.
-                *
-                * We rather take context corruption instead of
-                * failed reset with a wedged driver/gpu. And
-                * active bb execution case should be covered by
-                * i915_stop_engines we have before the reset.
-                */
-       }
-
-       ret = reset_engines(dev_priv, engine_mask, retry);
-
-skip_reset:
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
-               gen8_engine_reset_cancel(engine);
-
-       return ret;
-}
-
-typedef int (*reset_func)(struct drm_i915_private *,
-                         unsigned int engine_mask, unsigned int retry);
-
-static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
-{
-       if (!i915_modparams.reset)
-               return NULL;
-
-       if (INTEL_GEN(dev_priv) >= 8)
-               return gen8_reset_engines;
-       else if (INTEL_GEN(dev_priv) >= 6)
-               return gen6_reset_engines;
-       else if (IS_GEN5(dev_priv))
-               return ironlake_do_reset;
-       else if (IS_G4X(dev_priv))
-               return g4x_do_reset;
-       else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
-               return g33_do_reset;
-       else if (INTEL_GEN(dev_priv) >= 3)
-               return i915_do_reset;
-       else
-               return NULL;
-}
-
-int intel_gpu_reset(struct drm_i915_private *dev_priv,
-                   const unsigned int engine_mask)
-{
-       reset_func reset = intel_get_gpu_reset(dev_priv);
-       unsigned int retry;
-       int ret;
-
-       GEM_BUG_ON(!engine_mask);
-
-       /*
-        * We want to perform per-engine reset from atomic context (e.g.
-        * softirq), which imposes the constraint that we cannot sleep.
-        * However, experience suggests that spending a bit of time waiting
-        * for a reset helps in various cases, so for a full-device reset
-        * we apply the opposite rule and wait if we want to. As we should
-        * always follow up a failed per-engine reset with a full device reset,
-        * being a little faster, stricter and more error prone for the
-        * atomic case seems an acceptable compromise.
-        *
-        * Unfortunately this leads to a bimodal routine, when the goal was
-        * to have a single reset function that worked for resetting any
-        * number of engines simultaneously.
-        */
-       might_sleep_if(engine_mask == ALL_ENGINES);
-
-       /*
-        * If the power well sleeps during the reset, the reset
-        * request may be dropped and never completes (causing -EIO).
-        */
-       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-       for (retry = 0; retry < 3; retry++) {
-
-               /*
-                * We stop engines, otherwise we might get failed reset and a
-                * dead gpu (on elk). Also as modern gpu as kbl can suffer
-                * from system hang if batchbuffer is progressing when
-                * the reset is issued, regardless of READY_TO_RESET ack.
-                * Thus assume it is best to stop engines on all gens
-                * where we have a gpu reset.
-                *
-                * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
-                *
-                * WaMediaResetMainRingCleanup:ctg,elk (presumably)
-                *
-                * FIXME: Wa for more modern gens needs to be validated
-                */
-               i915_stop_engines(dev_priv, engine_mask);
-
-               ret = -ENODEV;
-               if (reset) {
-                       ret = reset(dev_priv, engine_mask, retry);
-                       GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n",
-                                 engine_mask, ret, retry);
-               }
-               if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
-                       break;
-
-               cond_resched();
-       }
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
-       return ret;
-}
-
-bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
-{
-       return intel_get_gpu_reset(dev_priv) != NULL;
-}
-
-bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
-{
-       return (dev_priv->info.has_reset_engine &&
-               i915_modparams.reset >= 2);
-}
-
-int intel_reset_guc(struct drm_i915_private *dev_priv)
-{
-       u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
-                                                    GEN9_GRDOM_GUC;
-       int ret;
-
-       GEM_BUG_ON(!HAS_GUC(dev_priv));
-
-       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-       ret = gen6_hw_domain_reset(dev_priv, guc_domain);
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
-       return ret;
-}
-
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
 {
        return check_for_unclaimed_mmio(dev_priv);
@@ -2321,7 +1874,7 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
        } else if (INTEL_GEN(dev_priv) >= 6) {
                fw_domains = __gen6_reg_read_fw_domains(offset);
        } else {
-               WARN_ON(!IS_GEN(dev_priv, 2, 5));
+               WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
                fw_domains = 0;
        }
 
@@ -2341,12 +1894,12 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
                fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
        } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
                fw_domains = __fwtable_reg_write_fw_domains(offset);
-       } else if (IS_GEN8(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 8)) {
                fw_domains = __gen8_reg_write_fw_domains(offset);
-       } else if (IS_GEN(dev_priv, 6, 7)) {
+       } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
                fw_domains = FORCEWAKE_RENDER;
        } else {
-               WARN_ON(!IS_GEN(dev_priv, 2, 5));
+               WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
                fw_domains = 0;
        }
 
index c56ba0e04044aea9bf68ecdd9fabfa4453b3bf64..23abf03736e7262cba70668e7eba9217fbeb3a75 100644 (file)
@@ -6,7 +6,6 @@
  *         Manasi Navare <manasi.d.navare@intel.com>
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
@@ -1083,6 +1082,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
        I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
 
        /* Disable Power wells for VDSC/joining */
-       intel_display_power_put(dev_priv,
-                               intel_dsc_power_domain(old_crtc_state));
+       intel_display_power_put_unchecked(dev_priv,
+                                         intel_dsc_power_domain(old_crtc_state));
 }
index 92cb82dd0c0731fdb09c3d51a66b089768b1c7b0..f82a415ea2ba383397905cfccea8d28efa6add3b 100644 (file)
@@ -130,11 +130,11 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
 {
        int err = 0;
 
-       if (IS_GEN9(i915))
+       if (IS_GEN(i915, 9))
                err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
 
        if (!err &&
-           (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
+           (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
                err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
 
        return err;
@@ -163,7 +163,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
        u32 guc_wopcm_rsvd;
        int err;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return 0;
 
        GEM_BUG_ON(!wopcm->size);
index 4f41e326f3f3fd77e0a8d519e1f33933a5a0d92b..15f4a6dee5aad20e80aae26435a3e7baeb435132 100644 (file)
@@ -142,7 +142,8 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
 }
 
 static void
-__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+                  u32 val)
 {
        struct i915_wa wa = {
                .reg = reg,
@@ -153,16 +154,32 @@ __wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
        _wa_add(wal, &wa);
 }
 
-#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+       wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
+}
+
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+       wa_write_masked_or(wal, reg, ~0, val);
+}
+
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+       wa_write_masked_or(wal, reg, val, val);
+}
 
 #define WA_SET_BIT_MASKED(addr, mask) \
-       WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+       wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
 
 #define WA_CLR_BIT_MASKED(addr, mask) \
-       WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+       wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
 
 #define WA_SET_FIELD_MASKED(addr, mask, value) \
-       WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
+       wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
 
 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
@@ -366,7 +383,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
                 * Only consider slices where one, and only one, subslice has 7
                 * EUs
                 */
-               if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
+               if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
                        continue;
 
                /*
@@ -375,7 +392,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
                 *
                 * ->    0 <= ss <= 3;
                 */
-               ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
+               ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
                vals[i] = 3 - ss;
        }
 
@@ -532,6 +549,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
        if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
                WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
                                  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
+       /* WaEnableFloatBlendOptimization:icl */
+       wa_write_masked_or(wal,
+                          GEN10_CACHE_MODE_SS,
+                          0, /* write-only, so skip validation */
+                          _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
 }
 
 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
@@ -603,46 +626,8 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
 }
 
 static void
-wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa wa = {
-               .reg = reg,
-               .mask = val,
-               .val = _MASKED_BIT_ENABLE(val)
-       };
-
-       _wa_add(wal, &wa);
-}
-
-static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
-                  u32 val)
-{
-       struct i915_wa wa = {
-               .reg = reg,
-               .mask = mask,
-               .val = val
-       };
-
-       _wa_add(wal, &wa);
-}
-
-static void
-wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
-{
-       wa_write_masked_or(wal, reg, ~0, val);
-}
-
-static void
-wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
-{
-       wa_write_masked_or(wal, reg, val, val);
-}
-
-static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
-{
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
        /* WaDisableKillLogic:bxt,skl,kbl */
        if (!IS_COFFEELAKE(i915))
                wa_write_or(wal,
@@ -666,11 +651,10 @@ static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
                    BDW_DISABLE_HDC_INVALIDATION);
 }
 
-static void skl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaDisableGafsUnitClkGating:skl */
        wa_write_or(wal,
@@ -684,11 +668,10 @@ static void skl_gt_workarounds_init(struct drm_i915_private *i915)
                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaInPlaceDecompressionHang:bxt */
        wa_write_or(wal,
@@ -696,11 +679,10 @@ static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaDisableDynamicCreditSharing:kbl */
        if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
@@ -719,16 +701,16 @@ static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void glk_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 }
 
-static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaDisableGafsUnitClkGating:cfl */
        wa_write_or(wal,
@@ -741,10 +723,10 @@ static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void wa_init_mcr(struct drm_i915_private *dev_priv)
+static void
+wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
 {
-       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
-       struct i915_wa_list *wal = &dev_priv->gt_wa_list;
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u32 mcr_slice_subslice_mask;
 
        /*
@@ -804,11 +786,10 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
                           intel_calculate_mcr_s_ss_select(dev_priv));
 }
 
-static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       wa_init_mcr(i915);
+       wa_init_mcr(i915, wal);
 
        /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
        if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
@@ -822,11 +803,10 @@ static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void icl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       wa_init_mcr(i915);
+       wa_init_mcr(i915, wal);
 
        /* WaInPlaceDecompressionHang:icl */
        wa_write_or(wal,
@@ -879,12 +859,9 @@ static void icl_gt_workarounds_init(struct drm_i915_private *i915)
                    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
-void intel_gt_init_workarounds(struct drm_i915_private *i915)
+static void
+gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       wa_init_start(wal, "GT");
-
        if (INTEL_GEN(i915) < 8)
                return;
        else if (IS_BROADWELL(i915))
@@ -892,22 +869,29 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
        else if (IS_CHERRYVIEW(i915))
                return;
        else if (IS_SKYLAKE(i915))
-               skl_gt_workarounds_init(i915);
+               skl_gt_workarounds_init(i915, wal);
        else if (IS_BROXTON(i915))
-               bxt_gt_workarounds_init(i915);
+               bxt_gt_workarounds_init(i915, wal);
        else if (IS_KABYLAKE(i915))
-               kbl_gt_workarounds_init(i915);
+               kbl_gt_workarounds_init(i915, wal);
        else if (IS_GEMINILAKE(i915))
-               glk_gt_workarounds_init(i915);
+               glk_gt_workarounds_init(i915, wal);
        else if (IS_COFFEELAKE(i915))
-               cfl_gt_workarounds_init(i915);
+               cfl_gt_workarounds_init(i915, wal);
        else if (IS_CANNONLAKE(i915))
-               cnl_gt_workarounds_init(i915);
+               cnl_gt_workarounds_init(i915, wal);
        else if (IS_ICELAKE(i915))
-               icl_gt_workarounds_init(i915);
+               icl_gt_workarounds_init(i915, wal);
        else
                MISSING_CASE(INTEL_GEN(i915));
+}
 
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
+{
+       struct i915_wa_list *wal = &i915->gt_wa_list;
+
+       wa_init_start(wal, "GT");
+       gt_init_workarounds(i915, wal);
        wa_init_finish(wal);
 }
 
@@ -955,8 +939,6 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
 
        intel_uncore_forcewake_put__locked(dev_priv, fw);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
-       DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
 }
 
 void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
@@ -1126,14 +1108,12 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
        for (; i < RING_MAX_NONPRIV_SLOTS; i++)
                I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
                           i915_mmio_reg_offset(RING_NOPID(base)));
-
-       DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
 }
 
-static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->wa_list;
 
        if (IS_ICELAKE(i915)) {
                /* This is not an Wa. Enable for better image quality */
@@ -1190,7 +1170,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
                                    GEN7_DISABLE_SAMPLER_PREFETCH);
        }
 
-       if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+       if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
                /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
                wa_masked_en(wal,
                             GEN7_FF_SLICE_CS_CHICKEN1,
@@ -1211,7 +1191,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
                             GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
        }
 
-       if (IS_GEN9(i915)) {
+       if (IS_GEN(i915, 9)) {
                /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
                wa_masked_en(wal,
                             GEN9_CSFE_CHICKEN1_RCS,
@@ -1237,10 +1217,10 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
        }
 }
 
-static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->wa_list;
 
        /* WaKBLVECSSemaphoreWaitPoll:kbl */
        if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
@@ -1250,6 +1230,18 @@ static void xcs_engine_wa_init(struct intel_engine_cs *engine)
        }
 }
 
+static void
+engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+       if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+               return;
+
+       if (engine->id == RCS)
+               rcs_engine_wa_init(engine, wal);
+       else
+               xcs_engine_wa_init(engine, wal);
+}
+
 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
 {
        struct i915_wa_list *wal = &engine->wa_list;
@@ -1258,12 +1250,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
                return;
 
        wa_init_start(wal, engine->name);
-
-       if (engine->id == RCS)
-               rcs_engine_wa_init(engine);
-       else
-               xcs_engine_wa_init(engine);
-
+       engine_init_workarounds(engine, wal);
        wa_init_finish(wal);
 }
 
@@ -1273,11 +1260,5 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
-                                           const char *from)
-{
-       return wa_list_verify(engine->i915, &engine->wa_list, from);
-}
-
 #include "selftests/intel_workarounds.c"
 #endif
index 26c065c8d2c0a7e3b550dc96fa94ee611daae296..a9a2fa35876fe5537db2e4693334097aae4f7def 100644 (file)
@@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
 {
        struct i915_request *rq;
        struct i915_vma *batch;
-       int flags = 0;
        int err;
 
        GEM_BUG_ON(!intel_engine_can_store_dword(engine));
@@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
        if (err)
                return err;
 
-       rq = i915_request_alloc(engine, ctx);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
        batch = gpu_write_dw(vma, dword * sizeof(u32), value);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto err_request;
+       if (IS_ERR(batch))
+               return PTR_ERR(batch);
+
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto err_batch;
        }
 
        err = i915_vma_move_to_active(batch, rq, 0);
@@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
                goto err_request;
 
        i915_gem_object_set_active_reference(batch->obj);
-       i915_vma_unpin(batch);
-       i915_vma_close(batch);
 
-       err = engine->emit_bb_start(rq,
-                                   batch->node.start, batch->node.size,
-                                   flags);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto err_request;
 
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = engine->emit_bb_start(rq,
+                                   batch->node.start, batch->node.size,
+                                   0);
+err_request:
        if (err)
                i915_request_skip(rq, err);
-
-err_request:
        i915_request_add(rq);
+err_batch:
+       i915_vma_unpin(batch);
+       i915_vma_close(batch);
 
        return err;
 }
@@ -1450,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg)
         * huge-gtt-pages.
         */
 
-       if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
+       if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
                pr_info("48b PPGTT not supported, skipping\n");
                return 0;
        }
@@ -1703,7 +1702,6 @@ int i915_gem_huge_page_mock_selftests(void)
        };
        struct drm_i915_private *dev_priv;
        struct i915_hw_ppgtt *ppgtt;
-       struct pci_dev *pdev;
        int err;
 
        dev_priv = mock_gem_device();
@@ -1713,9 +1711,6 @@ int i915_gem_huge_page_mock_selftests(void)
        /* Pretend to be a device which supports the 48b PPGTT */
        mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
 
-       pdev = dev_priv->drm.pdev;
-       dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
-
        mutex_lock(&dev_priv->drm.struct_mutex);
        ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
        if (IS_ERR(ppgtt)) {
@@ -1761,6 +1756,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
        };
        struct drm_file *file;
        struct i915_gem_context *ctx;
+       intel_wakeref_t wakeref;
        int err;
 
        if (!HAS_PPGTT(dev_priv)) {
@@ -1776,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
                return PTR_ERR(file);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        ctx = live_context(dev_priv, file);
        if (IS_ERR(ctx)) {
@@ -1790,7 +1786,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
        err = i915_subtests(tests, ctx);
 
 out_unlock:
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        mock_file_free(dev_priv, file);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
new file mode 100644 (file)
index 0000000..337b1f9
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+struct live_active {
+       struct i915_active base;
+       bool retired;
+};
+
+static void __live_active_retire(struct i915_active *base)
+{
+       struct live_active *active = container_of(base, typeof(*active), base);
+
+       active->retired = true;
+}
+
+static int __live_active_setup(struct drm_i915_private *i915,
+                              struct live_active *active)
+{
+       struct intel_engine_cs *engine;
+       struct i915_sw_fence *submit;
+       enum intel_engine_id id;
+       unsigned int count = 0;
+       int err = 0;
+
+       submit = heap_fence_create(GFP_KERNEL);
+       if (!submit)
+               return -ENOMEM;
+
+       i915_active_init(i915, &active->base, __live_active_retire);
+       active->retired = false;
+
+       if (!i915_active_acquire(&active->base)) {
+               pr_err("First i915_active_acquire should report being idle\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       for_each_engine(engine, i915, id) {
+               struct i915_request *rq;
+
+               rq = i915_request_alloc(engine, i915->kernel_context);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+                                                      submit,
+                                                      GFP_KERNEL);
+               if (err >= 0)
+                       err = i915_active_ref(&active->base,
+                                             rq->fence.context, rq);
+               i915_request_add(rq);
+               if (err) {
+                       pr_err("Failed to track active ref!\n");
+                       break;
+               }
+
+               count++;
+       }
+
+       i915_active_release(&active->base);
+       if (active->retired && count) {
+               pr_err("i915_active retired before submission!\n");
+               err = -EINVAL;
+       }
+       if (active->base.count != count) {
+               pr_err("i915_active not tracking all requests, found %d, expected %d\n",
+                      active->base.count, count);
+               err = -EINVAL;
+       }
+
+out:
+       i915_sw_fence_commit(submit);
+       heap_fence_put(submit);
+
+       return err;
+}
+
+static int live_active_wait(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct live_active active;
+       intel_wakeref_t wakeref;
+       int err;
+
+       /* Check that we get a callback when requests retire upon waiting */
+
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       err = __live_active_setup(i915, &active);
+
+       i915_active_wait(&active.base);
+       if (!active.retired) {
+               pr_err("i915_active not retired after waiting!\n");
+               err = -EINVAL;
+       }
+
+       i915_active_fini(&active.base);
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+       return err;
+}
+
+static int live_active_retire(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct live_active active;
+       intel_wakeref_t wakeref;
+       int err;
+
+       /* Check that we get a callback when requests are indirectly retired */
+
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       err = __live_active_setup(i915, &active);
+
+       /* waits for & retires all requests */
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+
+       if (!active.retired) {
+               pr_err("i915_active not retired after flushing!\n");
+               err = -EINVAL;
+       }
+
+       i915_active_fini(&active.base);
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+       return err;
+}
+
+int i915_active_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(live_active_wait),
+               SUBTEST(live_active_retire),
+       };
+
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return 0;
+
+       return i915_subtests(tests, i915);
+}
index d0aa19d176536c0f2d4f273be4c113d472de89c5..e77b7ed449ae8ec5380ffbcd18184056c6757efa 100644 (file)
@@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915,
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = 0;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        for_each_engine(engine, i915, id) {
                struct i915_request *rq;
@@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915,
                i915_request_add(rq);
        }
 
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        return err;
 }
@@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915)
 
 static void simulate_hibernate(struct drm_i915_private *i915)
 {
-       intel_runtime_pm_get(i915);
+       intel_wakeref_t wakeref;
+
+       wakeref = intel_runtime_pm_get(i915);
 
        /*
         * As a final sting in the tail, invalidate stolen. Under a real S4,
@@ -76,7 +79,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
         */
        trash_stolen(i915);
 
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static int pm_prepare(struct drm_i915_private *i915)
@@ -93,39 +96,39 @@ static int pm_prepare(struct drm_i915_private *i915)
 
 static void pm_suspend(struct drm_i915_private *i915)
 {
-       intel_runtime_pm_get(i915);
-
-       i915_gem_suspend_gtt_mappings(i915);
-       i915_gem_suspend_late(i915);
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_put(i915);
+       with_intel_runtime_pm(i915, wakeref) {
+               i915_gem_suspend_gtt_mappings(i915);
+               i915_gem_suspend_late(i915);
+       }
 }
 
 static void pm_hibernate(struct drm_i915_private *i915)
 {
-       intel_runtime_pm_get(i915);
+       intel_wakeref_t wakeref;
 
-       i915_gem_suspend_gtt_mappings(i915);
+       with_intel_runtime_pm(i915, wakeref) {
+               i915_gem_suspend_gtt_mappings(i915);
 
-       i915_gem_freeze(i915);
-       i915_gem_freeze_late(i915);
-
-       intel_runtime_pm_put(i915);
+               i915_gem_freeze(i915);
+               i915_gem_freeze_late(i915);
+       }
 }
 
 static void pm_resume(struct drm_i915_private *i915)
 {
+       intel_wakeref_t wakeref;
+
        /*
         * Both suspend and hibernate follow the same wakeup path and assume
         * that runtime-pm just works.
         */
-       intel_runtime_pm_get(i915);
-
-       intel_engines_sanitize(i915);
-       i915_gem_sanitize(i915);
-       i915_gem_resume(i915);
-
-       intel_runtime_pm_put(i915);
+       with_intel_runtime_pm(i915, wakeref) {
+               intel_engines_sanitize(i915, false);
+               i915_gem_sanitize(i915);
+               i915_gem_resume(i915);
+       }
 }
 
 static int igt_gem_suspend(void *arg)
index f7392c1ffe755cf3011f7457fac0294e2e41201d..fd89a5a33c1a0b1719c8bbbeb354c55ec498484f 100644 (file)
@@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg)
        struct drm_i915_private *i915 = arg;
        const struct igt_coherency_mode *read, *write, *over;
        struct drm_i915_gem_object *obj;
+       intel_wakeref_t wakeref;
        unsigned long count, n;
        u32 *offsets, *values;
        int err = 0;
@@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg)
        values = offsets + ncachelines;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        for (over = igt_coherency_mode; over->name; over++) {
                if (!over->set)
                        continue;
@@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg)
                }
        }
 unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        kfree(offsets);
        return err;
index 7d82043aff1099c82a35fde7fe0bce2f4b454bcb..d00d0bb07784229a323145336803802479c6ebb9 100644 (file)
 
 #include <linux/prime_numbers.h>
 
+#include "../i915_reset.h"
 #include "../i915_selftest.h"
 #include "i915_random.h"
 #include "igt_flush_test.h"
+#include "igt_live_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
 
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 
 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
 
-struct live_test {
-       struct drm_i915_private *i915;
-       const char *func;
-       const char *name;
-
-       unsigned int reset_global;
-       unsigned int reset_engine[I915_NUM_ENGINES];
-};
-
-static int begin_live_test(struct live_test *t,
-                          struct drm_i915_private *i915,
-                          const char *func,
-                          const char *name)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int err;
-
-       t->i915 = i915;
-       t->func = func;
-       t->name = name;
-
-       err = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_LOCKED,
-                                    MAX_SCHEDULE_TIMEOUT);
-       if (err) {
-               pr_err("%s(%s): failed to idle before, with err=%d!",
-                      func, name, err);
-               return err;
-       }
-
-       i915->gpu_error.missed_irq_rings = 0;
-       t->reset_global = i915_reset_count(&i915->gpu_error);
-
-       for_each_engine(engine, i915, id)
-               t->reset_engine[id] =
-                       i915_reset_engine_count(&i915->gpu_error, engine);
-
-       return 0;
-}
-
-static int end_live_test(struct live_test *t)
-{
-       struct drm_i915_private *i915 = t->i915;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
-               return -EIO;
-
-       if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
-               pr_err("%s(%s): GPU was reset %d times!\n",
-                      t->func, t->name,
-                      i915_reset_count(&i915->gpu_error) - t->reset_global);
-               return -EIO;
-       }
-
-       for_each_engine(engine, i915, id) {
-               if (t->reset_engine[id] ==
-                   i915_reset_engine_count(&i915->gpu_error, engine))
-                       continue;
-
-               pr_err("%s(%s): engine '%s' was reset %d times!\n",
-                      t->func, t->name, engine->name,
-                      i915_reset_engine_count(&i915->gpu_error, engine) -
-                      t->reset_engine[id]);
-               return -EIO;
-       }
-
-       if (i915->gpu_error.missed_irq_rings) {
-               pr_err("%s(%s): Missed interrupts on engines %lx\n",
-                      t->func, t->name, i915->gpu_error.missed_irq_rings);
-               return -EIO;
-       }
-
-       return 0;
-}
-
 static int live_nop_switch(void *arg)
 {
        const unsigned int nctx = 1024;
@@ -119,8 +45,9 @@ static int live_nop_switch(void *arg)
        struct intel_engine_cs *engine;
        struct i915_gem_context **ctx;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       struct igt_live_test t;
        struct drm_file *file;
-       struct live_test t;
        unsigned long n;
        int err = -ENODEV;
 
@@ -140,7 +67,7 @@ static int live_nop_switch(void *arg)
                return PTR_ERR(file);
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
@@ -184,7 +111,7 @@ static int live_nop_switch(void *arg)
                pr_info("Populated %d contexts on %s in %lluns\n",
                        nctx, engine->name, ktime_to_ns(times[1] - times[0]));
 
-               err = begin_live_test(&t, i915, __func__, engine->name);
+               err = igt_live_test_begin(&t, i915, __func__, engine->name);
                if (err)
                        goto out_unlock;
 
@@ -232,7 +159,7 @@ static int live_nop_switch(void *arg)
                                break;
                }
 
-               err = end_live_test(&t);
+               err = igt_live_test_end(&t);
                if (err)
                        goto out_unlock;
 
@@ -243,7 +170,7 @@ static int live_nop_switch(void *arg)
        }
 
 out_unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        mock_file_free(i915, file);
        return err;
@@ -553,10 +480,10 @@ static int igt_ctx_exec(void *arg)
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj = NULL;
        unsigned long ncontexts, ndwords, dw;
+       struct igt_live_test t;
        struct drm_file *file;
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
-       struct live_test t;
        int err = -ENODEV;
 
        /*
@@ -574,7 +501,7 @@ static int igt_ctx_exec(void *arg)
 
        mutex_lock(&i915->drm.struct_mutex);
 
-       err = begin_live_test(&t, i915, __func__, "");
+       err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
                goto out_unlock;
 
@@ -593,6 +520,8 @@ static int igt_ctx_exec(void *arg)
                }
 
                for_each_engine(engine, i915, id) {
+                       intel_wakeref_t wakeref;
+
                        if (!engine->context_size)
                                continue; /* No logical context support in HW */
 
@@ -607,9 +536,9 @@ static int igt_ctx_exec(void *arg)
                                }
                        }
 
-                       intel_runtime_pm_get(i915);
-                       err = gpu_fill(obj, ctx, engine, dw);
-                       intel_runtime_pm_put(i915);
+                       err = 0;
+                       with_intel_runtime_pm(i915, wakeref)
+                               err = gpu_fill(obj, ctx, engine, dw);
                        if (err) {
                                pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
                                       ndwords, dw, max_dwords(obj),
@@ -627,7 +556,7 @@ static int igt_ctx_exec(void *arg)
                ncontexts++;
        }
        pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
-               ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
+               ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
 
        dw = 0;
        list_for_each_entry(obj, &objects, st_link) {
@@ -642,7 +571,7 @@ static int igt_ctx_exec(void *arg)
        }
 
 out_unlock:
-       if (end_live_test(&t))
+       if (igt_live_test_end(&t))
                err = -EIO;
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -650,6 +579,469 @@ out_unlock:
        return err;
 }
 
+static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
+{
+       struct drm_i915_gem_object *obj;
+       u32 *cmd;
+       int err;
+
+       if (INTEL_GEN(vma->vm->i915) < 8)
+               return ERR_PTR(-EINVAL);
+
+       obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto err;
+       }
+
+       *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+       *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
+       *cmd++ = lower_32_bits(vma->node.start);
+       *cmd++ = upper_32_bits(vma->node.start);
+       *cmd = MI_BATCH_BUFFER_END;
+
+       i915_gem_object_unpin_map(obj);
+
+       err = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (err)
+               goto err;
+
+       vma = i915_vma_instance(obj, vma->vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err)
+               goto err;
+
+       return vma;
+
+err:
+       i915_gem_object_put(obj);
+       return ERR_PTR(err);
+}
+
+static int
+emit_rpcs_query(struct drm_i915_gem_object *obj,
+               struct i915_gem_context *ctx,
+               struct intel_engine_cs *engine,
+               struct i915_request **rq_out)
+{
+       struct i915_request *rq;
+       struct i915_vma *batch;
+       struct i915_vma *vma;
+       int err;
+
+       GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+
+       vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
+
+       err = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (err)
+               return err;
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err)
+               return err;
+
+       batch = rpcs_query_batch(vma);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto err_vma;
+       }
+
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto err_batch;
+       }
+
+       err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0);
+       if (err)
+               goto err_request;
+
+       err = i915_vma_move_to_active(batch, rq, 0);
+       if (err)
+               goto skip_request;
+
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto skip_request;
+
+       i915_gem_object_set_active_reference(batch->obj);
+       i915_vma_unpin(batch);
+       i915_vma_close(batch);
+
+       i915_vma_unpin(vma);
+
+       *rq_out = i915_request_get(rq);
+
+       i915_request_add(rq);
+
+       return 0;
+
+skip_request:
+       i915_request_skip(rq, err);
+err_request:
+       i915_request_add(rq);
+err_batch:
+       i915_vma_unpin(batch);
+err_vma:
+       i915_vma_unpin(vma);
+
+       return err;
+}
+
+#define TEST_IDLE      BIT(0)
+#define TEST_BUSY      BIT(1)
+#define TEST_RESET     BIT(2)
+
+static int
+__sseu_prepare(struct drm_i915_private *i915,
+              const char *name,
+              unsigned int flags,
+              struct i915_gem_context *ctx,
+              struct intel_engine_cs *engine,
+              struct igt_spinner **spin_out)
+{
+       int ret = 0;
+
+       if (flags & (TEST_BUSY | TEST_RESET)) {
+               struct igt_spinner *spin;
+               struct i915_request *rq;
+
+               spin = kzalloc(sizeof(*spin), GFP_KERNEL);
+               if (!spin) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               ret = igt_spinner_init(spin, i915);
+               if (ret)
+                       return ret;
+
+               rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+               if (IS_ERR(rq)) {
+                       ret = PTR_ERR(rq);
+                       igt_spinner_fini(spin);
+                       kfree(spin);
+                       goto out;
+               }
+
+               i915_request_add(rq);
+
+               if (!igt_wait_for_spinner(spin, rq)) {
+                       pr_err("%s: Spinner failed to start!\n", name);
+                       igt_spinner_end(spin);
+                       igt_spinner_fini(spin);
+                       kfree(spin);
+                       ret = -ETIMEDOUT;
+                       goto out;
+               }
+
+               *spin_out = spin;
+       }
+
+out:
+       return ret;
+}
+
+static int
+__read_slice_count(struct drm_i915_private *i915,
+                  struct i915_gem_context *ctx,
+                  struct intel_engine_cs *engine,
+                  struct drm_i915_gem_object *obj,
+                  struct igt_spinner *spin,
+                  u32 *rpcs)
+{
+       struct i915_request *rq = NULL;
+       u32 s_mask, s_shift;
+       unsigned int cnt;
+       u32 *buf, val;
+       long ret;
+
+       ret = emit_rpcs_query(obj, ctx, engine, &rq);
+       if (ret)
+               return ret;
+
+       if (spin)
+               igt_spinner_end(spin);
+
+       ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
+       i915_request_put(rq);
+       if (ret < 0)
+               return ret;
+
+       buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(buf)) {
+               ret = PTR_ERR(buf);
+               return ret;
+       }
+
+       if (INTEL_GEN(i915) >= 11) {
+               s_mask = GEN11_RPCS_S_CNT_MASK;
+               s_shift = GEN11_RPCS_S_CNT_SHIFT;
+       } else {
+               s_mask = GEN8_RPCS_S_CNT_MASK;
+               s_shift = GEN8_RPCS_S_CNT_SHIFT;
+       }
+
+       val = *buf;
+       cnt = (val & s_mask) >> s_shift;
+       *rpcs = val;
+
+       i915_gem_object_unpin_map(obj);
+
+       return cnt;
+}
+
+static int
+__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
+            const char *prefix, const char *suffix)
+{
+       if (slices == expected)
+               return 0;
+
+       if (slices < 0) {
+               pr_err("%s: %s read slice count failed with %d%s\n",
+                      name, prefix, slices, suffix);
+               return slices;
+       }
+
+       pr_err("%s: %s slice count %d is not %u%s\n",
+              name, prefix, slices, expected, suffix);
+
+       pr_info("RPCS=0x%x; %u%sx%u%s\n",
+               rpcs, slices,
+               (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
+               (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
+               (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
+
+       return -EINVAL;
+}
+
+static int
+__sseu_finish(struct drm_i915_private *i915,
+             const char *name,
+             unsigned int flags,
+             struct i915_gem_context *ctx,
+             struct i915_gem_context *kctx,
+             struct intel_engine_cs *engine,
+             struct drm_i915_gem_object *obj,
+             unsigned int expected,
+             struct igt_spinner *spin)
+{
+       unsigned int slices =
+               hweight32(intel_device_default_sseu(i915).slice_mask);
+       u32 rpcs = 0;
+       int ret = 0;
+
+       if (flags & TEST_RESET) {
+               ret = i915_reset_engine(engine, "sseu");
+               if (ret)
+                       goto out;
+       }
+
+       ret = __read_slice_count(i915, ctx, engine, obj,
+                                flags & TEST_RESET ? NULL : spin, &rpcs);
+       ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
+       if (ret)
+               goto out;
+
+       ret = __read_slice_count(i915, kctx, engine, obj, NULL, &rpcs);
+       ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
+
+out:
+       if (spin)
+               igt_spinner_end(spin);
+
+       if ((flags & TEST_IDLE) && ret == 0) {
+               ret = i915_gem_wait_for_idle(i915,
+                                            I915_WAIT_LOCKED,
+                                            MAX_SCHEDULE_TIMEOUT);
+               if (ret)
+                       return ret;
+
+               ret = __read_slice_count(i915, ctx, engine, obj, NULL, &rpcs);
+               ret = __check_rpcs(name, rpcs, ret, expected,
+                                  "Context", " after idle!");
+       }
+
+       return ret;
+}
+
+static int
+__sseu_test(struct drm_i915_private *i915,
+           const char *name,
+           unsigned int flags,
+           struct i915_gem_context *ctx,
+           struct intel_engine_cs *engine,
+           struct drm_i915_gem_object *obj,
+           struct intel_sseu sseu)
+{
+       struct igt_spinner *spin = NULL;
+       struct i915_gem_context *kctx;
+       int ret;
+
+       kctx = kernel_context(i915);
+       if (IS_ERR(kctx))
+               return PTR_ERR(kctx);
+
+       ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
+       if (ret)
+               goto out;
+
+       ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+       if (ret)
+               goto out;
+
+       ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
+                           hweight32(sseu.slice_mask), spin);
+
+out:
+       if (spin) {
+               igt_spinner_end(spin);
+               igt_spinner_fini(spin);
+               kfree(spin);
+       }
+
+       kernel_context_close(kctx);
+
+       return ret;
+}
+
+static int
+__igt_ctx_sseu(struct drm_i915_private *i915,
+              const char *name,
+              unsigned int flags)
+{
+       struct intel_sseu default_sseu = intel_device_default_sseu(i915);
+       struct intel_engine_cs *engine = i915->engine[RCS];
+       struct drm_i915_gem_object *obj;
+       struct i915_gem_context *ctx;
+       struct intel_sseu pg_sseu;
+       intel_wakeref_t wakeref;
+       struct drm_file *file;
+       int ret;
+
+       if (INTEL_GEN(i915) < 9)
+               return 0;
+
+       if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
+               return 0;
+
+       if (hweight32(default_sseu.slice_mask) < 2)
+               return 0;
+
+       /*
+        * Gen11 VME friendly power-gated configuration with half enabled
+        * sub-slices.
+        */
+       pg_sseu = default_sseu;
+       pg_sseu.slice_mask = 1;
+       pg_sseu.subslice_mask =
+               ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
+
+       pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
+               name, flags, hweight32(default_sseu.slice_mask),
+               hweight32(pg_sseu.slice_mask));
+
+       file = mock_file(i915);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       if (flags & TEST_RESET)
+               igt_global_reset_lock(i915);
+
+       mutex_lock(&i915->drm.struct_mutex);
+
+       ctx = i915_gem_create_context(i915, file->driver_priv);
+       if (IS_ERR(ctx)) {
+               ret = PTR_ERR(ctx);
+               goto out_unlock;
+       }
+
+       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       if (IS_ERR(obj)) {
+               ret = PTR_ERR(obj);
+               goto out_unlock;
+       }
+
+       wakeref = intel_runtime_pm_get(i915);
+
+       /* First set the default mask. */
+       ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+       if (ret)
+               goto out_fail;
+
+       /* Then set a power-gated configuration. */
+       ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+       if (ret)
+               goto out_fail;
+
+       /* Back to defaults. */
+       ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+       if (ret)
+               goto out_fail;
+
+       /* One last power-gated configuration for the road. */
+       ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+       if (ret)
+               goto out_fail;
+
+out_fail:
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               ret = -EIO;
+
+       i915_gem_object_put(obj);
+
+       intel_runtime_pm_put(i915, wakeref);
+
+out_unlock:
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       if (flags & TEST_RESET)
+               igt_global_reset_unlock(i915);
+
+       mock_file_free(i915, file);
+
+       if (ret)
+               pr_err("%s: Failed with %d!\n", name, ret);
+
+       return ret;
+}
+
+static int igt_ctx_sseu(void *arg)
+{
+       struct {
+               const char *name;
+               unsigned int flags;
+       } *phase, phases[] = {
+               { .name = "basic", .flags = 0 },
+               { .name = "idle", .flags = TEST_IDLE },
+               { .name = "busy", .flags = TEST_BUSY },
+               { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
+               { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
+               { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
+       };
+       unsigned int i;
+       int ret = 0;
+
+       for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
+            i++, phase++)
+               ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
+
+       return ret;
+}
+
 static int igt_ctx_readonly(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -657,11 +1049,11 @@ static int igt_ctx_readonly(void *arg)
        struct i915_gem_context *ctx;
        struct i915_hw_ppgtt *ppgtt;
        unsigned long ndwords, dw;
+       struct igt_live_test t;
        struct drm_file *file;
        I915_RND_STATE(prng);
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
-       struct live_test t;
        int err = -ENODEV;
 
        /*
@@ -676,7 +1068,7 @@ static int igt_ctx_readonly(void *arg)
 
        mutex_lock(&i915->drm.struct_mutex);
 
-       err = begin_live_test(&t, i915, __func__, "");
+       err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
                goto out_unlock;
 
@@ -699,6 +1091,8 @@ static int igt_ctx_readonly(void *arg)
                unsigned int id;
 
                for_each_engine(engine, i915, id) {
+                       intel_wakeref_t wakeref;
+
                        if (!intel_engine_can_store_dword(engine))
                                continue;
 
@@ -713,9 +1107,9 @@ static int igt_ctx_readonly(void *arg)
                                        i915_gem_object_set_readonly(obj);
                        }
 
-                       intel_runtime_pm_get(i915);
-                       err = gpu_fill(obj, ctx, engine, dw);
-                       intel_runtime_pm_put(i915);
+                       err = 0;
+                       with_intel_runtime_pm(i915, wakeref)
+                               err = gpu_fill(obj, ctx, engine, dw);
                        if (err) {
                                pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
                                       ndwords, dw, max_dwords(obj),
@@ -732,7 +1126,7 @@ static int igt_ctx_readonly(void *arg)
                }
        }
        pr_info("Submitted %lu dwords (across %u engines)\n",
-               ndwords, INTEL_INFO(i915)->num_rings);
+               ndwords, RUNTIME_INFO(i915)->num_rings);
 
        dw = 0;
        list_for_each_entry(obj, &objects, st_link) {
@@ -752,7 +1146,7 @@ static int igt_ctx_readonly(void *arg)
        }
 
 out_unlock:
-       if (end_live_test(&t))
+       if (igt_live_test_end(&t))
                err = -EIO;
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -976,10 +1370,11 @@ static int igt_vm_isolation(void *arg)
        struct drm_i915_private *i915 = arg;
        struct i915_gem_context *ctx_a, *ctx_b;
        struct intel_engine_cs *engine;
+       intel_wakeref_t wakeref;
+       struct igt_live_test t;
        struct drm_file *file;
        I915_RND_STATE(prng);
        unsigned long count;
-       struct live_test t;
        unsigned int id;
        u64 vm_total;
        int err;
@@ -998,7 +1393,7 @@ static int igt_vm_isolation(void *arg)
 
        mutex_lock(&i915->drm.struct_mutex);
 
-       err = begin_live_test(&t, i915, __func__, "");
+       err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
                goto out_unlock;
 
@@ -1022,7 +1417,7 @@ static int igt_vm_isolation(void *arg)
        GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
        vm_total -= I915_GTT_PAGE_SIZE;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        count = 0;
        for_each_engine(engine, i915, id) {
@@ -1064,12 +1459,12 @@ static int igt_vm_isolation(void *arg)
                count += this;
        }
        pr_info("Checked %lu scratch offsets across %d engines\n",
-               count, INTEL_INFO(i915)->num_rings);
+               count, RUNTIME_INFO(i915)->num_rings);
 
 out_rpm:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 out_unlock:
-       if (end_live_test(&t))
+       if (igt_live_test_end(&t))
                err = -EIO;
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -1165,6 +1560,7 @@ static int igt_switch_to_kernel_context(void *arg)
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err;
 
        /*
@@ -1175,7 +1571,7 @@ static int igt_switch_to_kernel_context(void *arg)
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        ctx = kernel_context(i915);
        if (IS_ERR(ctx)) {
@@ -1200,7 +1596,7 @@ out_unlock:
        if (igt_flush_test(i915, I915_WAIT_LOCKED))
                err = -EIO;
 
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
 
        kernel_context_close(ctx);
@@ -1232,6 +1628,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
                SUBTEST(live_nop_switch),
                SUBTEST(igt_ctx_exec),
                SUBTEST(igt_ctx_readonly),
+               SUBTEST(igt_ctx_sseu),
                SUBTEST(igt_vm_isolation),
        };
 
index 4365979d82228fa83c275f8a0f43b0ca6d11df60..32dce7176f6381dc2a0429691dccc2eafc7fe360 100644 (file)
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 
-static int populate_ggtt(struct drm_i915_private *i915)
+static void quirk_add(struct drm_i915_gem_object *obj,
+                     struct list_head *objects)
 {
+       /* quirk is only for live tiled objects, use it to declare ownership */
+       GEM_BUG_ON(obj->mm.quirked);
+       obj->mm.quirked = true;
+       list_add(&obj->st_link, objects);
+}
+
+static int populate_ggtt(struct drm_i915_private *i915,
+                        struct list_head *objects)
+{
+       unsigned long unbound, bound, count;
        struct drm_i915_gem_object *obj;
        u64 size;
 
+       count = 0;
        for (size = 0;
             size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             size += I915_GTT_PAGE_SIZE) {
@@ -43,21 +55,36 @@ static int populate_ggtt(struct drm_i915_private *i915)
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
+               quirk_add(obj, objects);
+
                vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
+
+               count++;
        }
 
-       if (!list_empty(&i915->mm.unbound_list)) {
-               size = 0;
-               list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
-                       size++;
+       unbound = 0;
+       list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+               if (obj->mm.quirked)
+                       unbound++;
+       if (unbound) {
+               pr_err("%s: Found %lu objects unbound, expected %u!\n",
+                      __func__, unbound, 0);
+               return -EINVAL;
+       }
 
-               pr_err("Found %lld objects unbound!\n", size);
+       bound = 0;
+       list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+               if (obj->mm.quirked)
+                       bound++;
+       if (bound != count) {
+               pr_err("%s: Found %lu objects bound, expected %lu!\n",
+                      __func__, bound, count);
                return -EINVAL;
        }
 
-       if (list_empty(&i915->ggtt.vm.inactive_list)) {
+       if (list_empty(&i915->ggtt.vm.bound_list)) {
                pr_err("No objects on the GGTT inactive list!\n");
                return -EINVAL;
        }
@@ -67,21 +94,26 @@ static int populate_ggtt(struct drm_i915_private *i915)
 
 static void unpin_ggtt(struct drm_i915_private *i915)
 {
+       struct i915_ggtt *ggtt = &i915->ggtt;
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
-               i915_vma_unpin(vma);
+       mutex_lock(&ggtt->vm.mutex);
+       list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
+               if (vma->obj->mm.quirked)
+                       i915_vma_unpin(vma);
+       mutex_unlock(&ggtt->vm.mutex);
 }
 
-static void cleanup_objects(struct drm_i915_private *i915)
+static void cleanup_objects(struct drm_i915_private *i915,
+                           struct list_head *list)
 {
        struct drm_i915_gem_object *obj, *on;
 
-       list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
-               i915_gem_object_put(obj);
-
-       list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
+       list_for_each_entry_safe(obj, on, list, st_link) {
+               GEM_BUG_ON(!obj->mm.quirked);
+               obj->mm.quirked = false;
                i915_gem_object_put(obj);
+       }
 
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -94,11 +126,12 @@ static int igt_evict_something(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct i915_ggtt *ggtt = &i915->ggtt;
+       LIST_HEAD(objects);
        int err;
 
        /* Fill the GGTT with pinned objects and try to evict one. */
 
-       err = populate_ggtt(i915);
+       err = populate_ggtt(i915, &objects);
        if (err)
                goto cleanup;
 
@@ -127,7 +160,7 @@ static int igt_evict_something(void *arg)
        }
 
 cleanup:
-       cleanup_objects(i915);
+       cleanup_objects(i915, &objects);
        return err;
 }
 
@@ -136,13 +169,14 @@ static int igt_overcommit(void *arg)
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
+       LIST_HEAD(objects);
        int err;
 
        /* Fill the GGTT with pinned objects and then try to pin one more.
         * We expect it to fail.
         */
 
-       err = populate_ggtt(i915);
+       err = populate_ggtt(i915, &objects);
        if (err)
                goto cleanup;
 
@@ -152,6 +186,8 @@ static int igt_overcommit(void *arg)
                goto cleanup;
        }
 
+       quirk_add(obj, &objects);
+
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
        if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
                pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
@@ -160,7 +196,7 @@ static int igt_overcommit(void *arg)
        }
 
 cleanup:
-       cleanup_objects(i915);
+       cleanup_objects(i915, &objects);
        return err;
 }
 
@@ -172,11 +208,12 @@ static int igt_evict_for_vma(void *arg)
                .start = 0,
                .size = 4096,
        };
+       LIST_HEAD(objects);
        int err;
 
        /* Fill the GGTT with pinned objects and try to evict a range. */
 
-       err = populate_ggtt(i915);
+       err = populate_ggtt(i915, &objects);
        if (err)
                goto cleanup;
 
@@ -199,7 +236,7 @@ static int igt_evict_for_vma(void *arg)
        }
 
 cleanup:
-       cleanup_objects(i915);
+       cleanup_objects(i915, &objects);
        return err;
 }
 
@@ -222,6 +259,7 @@ static int igt_evict_for_cache_color(void *arg)
        };
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
+       LIST_HEAD(objects);
        int err;
 
        /* Currently the use of color_adjust is limited to cache domains within
@@ -237,6 +275,7 @@ static int igt_evict_for_cache_color(void *arg)
                goto cleanup;
        }
        i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+       quirk_add(obj, &objects);
 
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
                                       I915_GTT_PAGE_SIZE | flags);
@@ -252,6 +291,7 @@ static int igt_evict_for_cache_color(void *arg)
                goto cleanup;
        }
        i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+       quirk_add(obj, &objects);
 
        /* Neighbouring; same colour - should fit */
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -287,7 +327,7 @@ static int igt_evict_for_cache_color(void *arg)
 
 cleanup:
        unpin_ggtt(i915);
-       cleanup_objects(i915);
+       cleanup_objects(i915, &objects);
        ggtt->vm.mm.color_adjust = NULL;
        return err;
 }
@@ -296,11 +336,12 @@ static int igt_evict_vm(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct i915_ggtt *ggtt = &i915->ggtt;
+       LIST_HEAD(objects);
        int err;
 
        /* Fill the GGTT with pinned objects and try to evict everything. */
 
-       err = populate_ggtt(i915);
+       err = populate_ggtt(i915, &objects);
        if (err)
                goto cleanup;
 
@@ -322,7 +363,7 @@ static int igt_evict_vm(void *arg)
        }
 
 cleanup:
-       cleanup_objects(i915);
+       cleanup_objects(i915, &objects);
        return err;
 }
 
@@ -336,6 +377,7 @@ static int igt_evict_contexts(void *arg)
                struct drm_mm_node node;
                struct reserved *next;
        } *reserved = NULL;
+       intel_wakeref_t wakeref;
        struct drm_mm_node hole;
        unsigned long count;
        int err;
@@ -355,7 +397,7 @@ static int igt_evict_contexts(void *arg)
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        /* Reserve a block so that we know we have enough to fit a few rq */
        memset(&hole, 0, sizeof(hole));
@@ -400,8 +442,10 @@ static int igt_evict_contexts(void *arg)
                struct drm_file *file;
 
                file = mock_file(i915);
-               if (IS_ERR(file))
-                       return PTR_ERR(file);
+               if (IS_ERR(file)) {
+                       err = PTR_ERR(file);
+                       break;
+               }
 
                count = 0;
                mutex_lock(&i915->drm.struct_mutex);
@@ -464,7 +508,7 @@ out_locked:
        }
        if (drm_mm_node_allocated(&hole))
                drm_mm_remove_node(&hole);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
 
        return err;
@@ -480,14 +524,17 @@ int i915_gem_evict_mock_selftests(void)
                SUBTEST(igt_overcommit),
        };
        struct drm_i915_private *i915;
-       int err;
+       intel_wakeref_t wakeref;
+       int err = 0;
 
        i915 = mock_gem_device();
        if (!i915)
                return -ENOMEM;
 
        mutex_lock(&i915->drm.struct_mutex);
-       err = i915_subtests(tests, i915);
+       with_intel_runtime_pm(i915, wakeref)
+               err = i915_subtests(tests, i915);
+
        mutex_unlock(&i915->drm.struct_mutex);
 
        drm_dev_put(&i915->drm);
index a9ed0ecc94e2d77fa23a721fb9ace38655851597..3850ef4a5ec89240883c5fcb074acd4ab78be235 100644 (file)
@@ -275,6 +275,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 
                for (n = 0; n < count; n++) {
                        u64 addr = hole_start + order[n] * BIT_ULL(size);
+                       intel_wakeref_t wakeref;
 
                        GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 
@@ -293,9 +294,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
                        mock_vma.node.size = BIT_ULL(size);
                        mock_vma.node.start = addr;
 
-                       intel_runtime_pm_get(i915);
+                       wakeref = intel_runtime_pm_get(i915);
                        vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
-                       intel_runtime_pm_put(i915);
+                       intel_runtime_pm_put(i915, wakeref);
                }
                count = n;
 
@@ -1144,6 +1145,7 @@ static int igt_ggtt_page(void *arg)
        struct drm_i915_private *i915 = arg;
        struct i915_ggtt *ggtt = &i915->ggtt;
        struct drm_i915_gem_object *obj;
+       intel_wakeref_t wakeref;
        struct drm_mm_node tmp;
        unsigned int *order, n;
        int err;
@@ -1169,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
        if (err)
                goto out_unpin;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        for (n = 0; n < count; n++) {
                u64 offset = tmp.start + n * PAGE_SIZE;
@@ -1216,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
        kfree(order);
 out_remove:
        ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        drm_mm_remove_node(&tmp);
 out_unpin:
        i915_gem_object_unpin_pages(obj);
@@ -1235,7 +1237,10 @@ static void track_vma_bind(struct i915_vma *vma)
        __i915_gem_object_pin_pages(obj);
 
        vma->pages = obj->mm.pages;
-       list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+       mutex_lock(&vma->vm->mutex);
+       list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+       mutex_unlock(&vma->vm->mutex);
 }
 
 static int exercise_mock(struct drm_i915_private *i915,
@@ -1265,27 +1270,35 @@ static int exercise_mock(struct drm_i915_private *i915,
 
 static int igt_mock_fill(void *arg)
 {
-       return exercise_mock(arg, fill_hole);
+       struct i915_ggtt *ggtt = arg;
+
+       return exercise_mock(ggtt->vm.i915, fill_hole);
 }
 
 static int igt_mock_walk(void *arg)
 {
-       return exercise_mock(arg, walk_hole);
+       struct i915_ggtt *ggtt = arg;
+
+       return exercise_mock(ggtt->vm.i915, walk_hole);
 }
 
 static int igt_mock_pot(void *arg)
 {
-       return exercise_mock(arg, pot_hole);
+       struct i915_ggtt *ggtt = arg;
+
+       return exercise_mock(ggtt->vm.i915, pot_hole);
 }
 
 static int igt_mock_drunk(void *arg)
 {
-       return exercise_mock(arg, drunk_hole);
+       struct i915_ggtt *ggtt = arg;
+
+       return exercise_mock(ggtt->vm.i915, drunk_hole);
 }
 
 static int igt_gtt_reserve(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct i915_ggtt *ggtt = arg;
        struct drm_i915_gem_object *obj, *on;
        LIST_HEAD(objects);
        u64 total;
@@ -1298,11 +1311,12 @@ static int igt_gtt_reserve(void *arg)
 
        /* Start by filling the GGTT */
        for (total = 0;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
-            total += 2*I915_GTT_PAGE_SIZE) {
+            total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+            total += 2 * I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
-               obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+               obj = i915_gem_object_create_internal(ggtt->vm.i915,
+                                                     2 * PAGE_SIZE);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
                        goto out;
@@ -1316,20 +1330,20 @@ static int igt_gtt_reserve(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+               vma = i915_vma_instance(obj, &ggtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+               err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
                                           obj->base.size,
                                           total,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.vm.total, err);
+                              total, ggtt->vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1347,11 +1361,12 @@ static int igt_gtt_reserve(void *arg)
 
        /* Now we start forcing evictions */
        for (total = I915_GTT_PAGE_SIZE;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
-            total += 2*I915_GTT_PAGE_SIZE) {
+            total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+            total += 2 * I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
-               obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+               obj = i915_gem_object_create_internal(ggtt->vm.i915,
+                                                     2 * PAGE_SIZE);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
                        goto out;
@@ -1365,20 +1380,20 @@ static int igt_gtt_reserve(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+               vma = i915_vma_instance(obj, &ggtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+               err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
                                           obj->base.size,
                                           total,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.vm.total, err);
+                              total, ggtt->vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1399,7 +1414,7 @@ static int igt_gtt_reserve(void *arg)
                struct i915_vma *vma;
                u64 offset;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+               vma = i915_vma_instance(obj, &ggtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1411,18 +1426,18 @@ static int igt_gtt_reserve(void *arg)
                        goto out;
                }
 
-               offset = random_offset(0, i915->ggtt.vm.total,
+               offset = random_offset(0, ggtt->vm.total,
                                       2*I915_GTT_PAGE_SIZE,
                                       I915_GTT_MIN_ALIGNMENT);
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+               err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
                                           obj->base.size,
                                           offset,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.vm.total, err);
+                              total, ggtt->vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1448,7 +1463,7 @@ out:
 
 static int igt_gtt_insert(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct i915_ggtt *ggtt = arg;
        struct drm_i915_gem_object *obj, *on;
        struct drm_mm_node tmp = {};
        const struct invalid_insert {
@@ -1457,8 +1472,8 @@ static int igt_gtt_insert(void *arg)
                u64 start, end;
        } invalid_insert[] = {
                {
-                       i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
-                       0, i915->ggtt.vm.total,
+                       ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
+                       0, ggtt->vm.total,
                },
                {
                        2*I915_GTT_PAGE_SIZE, 0,
@@ -1488,7 +1503,7 @@ static int igt_gtt_insert(void *arg)
 
        /* Check a couple of obviously invalid requests */
        for (ii = invalid_insert; ii->size; ii++) {
-               err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
+               err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
                                          ii->size, ii->alignment,
                                          I915_COLOR_UNEVICTABLE,
                                          ii->start, ii->end,
@@ -1503,11 +1518,12 @@ static int igt_gtt_insert(void *arg)
 
        /* Start by filling the GGTT */
        for (total = 0;
-            total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
+            total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
             total += I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
-               obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+               obj = i915_gem_object_create_internal(ggtt->vm.i915,
+                                                     I915_GTT_PAGE_SIZE);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
                        goto out;
@@ -1521,15 +1537,15 @@ static int igt_gtt_insert(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+               vma = i915_vma_instance(obj, &ggtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+               err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.vm.total,
+                                         0, ggtt->vm.total,
                                          0);
                if (err == -ENOSPC) {
                        /* maxed out the GGTT space */
@@ -1538,7 +1554,7 @@ static int igt_gtt_insert(void *arg)
                }
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.vm.total, err);
+                              total, ggtt->vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1550,7 +1566,7 @@ static int igt_gtt_insert(void *arg)
        list_for_each_entry(obj, &objects, st_link) {
                struct i915_vma *vma;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+               vma = i915_vma_instance(obj, &ggtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1570,7 +1586,7 @@ static int igt_gtt_insert(void *arg)
                struct i915_vma *vma;
                u64 offset;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+               vma = i915_vma_instance(obj, &ggtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1585,13 +1601,13 @@ static int igt_gtt_insert(void *arg)
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+               err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.vm.total,
+                                         0, ggtt->vm.total,
                                          0);
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.vm.total, err);
+                              total, ggtt->vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1607,11 +1623,12 @@ static int igt_gtt_insert(void *arg)
 
        /* And then force evictions */
        for (total = 0;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
-            total += 2*I915_GTT_PAGE_SIZE) {
+            total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+            total += 2 * I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
-               obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
+               obj = i915_gem_object_create_internal(ggtt->vm.i915,
+                                                     2 * I915_GTT_PAGE_SIZE);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
                        goto out;
@@ -1625,19 +1642,19 @@ static int igt_gtt_insert(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+               vma = i915_vma_instance(obj, &ggtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+               err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.vm.total,
+                                         0, ggtt->vm.total,
                                          0);
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.vm.total, err);
+                              total, ggtt->vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1664,17 +1681,25 @@ int i915_gem_gtt_mock_selftests(void)
                SUBTEST(igt_gtt_insert),
        };
        struct drm_i915_private *i915;
+       struct i915_ggtt ggtt;
        int err;
 
        i915 = mock_gem_device();
        if (!i915)
                return -ENOMEM;
 
+       mock_init_ggtt(i915, &ggtt);
+
        mutex_lock(&i915->drm.struct_mutex);
-       err = i915_subtests(tests, i915);
+       err = i915_subtests(tests, &ggtt);
+       mock_device_flush(i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
+       i915_gem_drain_freed_objects(i915);
+
+       mock_fini_ggtt(&ggtt);
        drm_dev_put(&i915->drm);
+
        return err;
 }
 
index c3999dd2021e4b76816a50ab0df6396205df5e2b..395ae878e0f7c3529ede2201b3b388a0a46dc785 100644 (file)
@@ -238,6 +238,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
                u32 *cpu;
 
                GEM_BUG_ON(view.partial.size > nreal);
+               cond_resched();
 
                err = i915_gem_object_set_to_gtt_domain(obj, true);
                if (err) {
@@ -307,6 +308,7 @@ static int igt_partial_tiling(void *arg)
        const unsigned int nreal = 1 << 12; /* largest tile row x2 */
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
+       intel_wakeref_t wakeref;
        int tiling;
        int err;
 
@@ -332,7 +334,7 @@ static int igt_partial_tiling(void *arg)
        }
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (1) {
                IGT_TIMEOUT(end);
@@ -443,7 +445,7 @@ next_tiling: ;
        }
 
 out_unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        i915_gem_object_unpin_pages(obj);
 out:
@@ -505,11 +507,13 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 
        mutex_lock(&i915->drm.struct_mutex);
        if (!i915->gt.active_requests++) {
-               intel_runtime_pm_get(i915);
-               i915_gem_unpark(i915);
-               intel_runtime_pm_put(i915);
+               intel_wakeref_t wakeref;
+
+               with_intel_runtime_pm(i915, wakeref)
+                       i915_gem_unpark(i915);
        }
        mutex_unlock(&i915->drm.struct_mutex);
+
        cancel_delayed_work_sync(&i915->gt.retire_work);
        cancel_delayed_work_sync(&i915->gt.idle_work);
 }
@@ -577,6 +581,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
 
        /* Now fill with busy dead objects that we expect to reap */
        for (loop = 0; loop < 3; loop++) {
+               intel_wakeref_t wakeref;
+
                if (i915_terminally_wedged(&i915->gpu_error))
                        break;
 
@@ -586,10 +592,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
                        goto out;
                }
 
+               err = 0;
                mutex_lock(&i915->drm.struct_mutex);
-               intel_runtime_pm_get(i915);
-               err = make_obj_busy(obj);
-               intel_runtime_pm_put(i915);
+               with_intel_runtime_pm(i915, wakeref)
+                       err = make_obj_busy(obj);
                mutex_unlock(&i915->drm.struct_mutex);
                if (err) {
                        pr_err("[loop %d] Failed to busy the object\n", loop);
index a15713cae3b3a9875a76a130599dafb1d4205ace..6d766925ad045f5ec46167bff68d08df7fe9a9ec 100644 (file)
@@ -12,7 +12,9 @@
 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
 selftest(uncore, intel_uncore_live_selftests)
 selftest(workarounds, intel_workarounds_live_selftests)
+selftest(timelines, i915_timeline_live_selftests)
 selftest(requests, i915_request_live_selftests)
+selftest(active, i915_active_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
index 1b70208eeea7a80fc7f9a14693f11de9bf3c0b1d..88e5ab586337cfdef4fb40c693b74ad3c76f6a9c 100644 (file)
@@ -15,8 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
 selftest(syncmap, i915_syncmap_mock_selftests)
 selftest(uncore, intel_uncore_mock_selftests)
 selftest(engine, intel_engine_cs_mock_selftests)
-selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
-selftest(timelines, i915_gem_timeline_mock_selftests)
+selftest(timelines, i915_timeline_mock_selftests)
 selftest(requests, i915_request_mock_selftests)
 selftest(objects, i915_gem_object_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
index 1f415ce4701831758935465a330e11eb2cf9dfc9..716a3f19f03035fe2cf6fd4c126ea8ca14092d8f 100644 (file)
@@ -41,18 +41,37 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd)
        return x;
 }
 
-void i915_random_reorder(unsigned int *order, unsigned int count,
-                        struct rnd_state *state)
+void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
+                         struct rnd_state *state)
 {
-       unsigned int i, j;
+       char stack[128];
+
+       if (WARN_ON(elsz > sizeof(stack) || count > U32_MAX))
+               return;
+
+       if (!elsz || !count)
+               return;
+
+       /* Fisher-Yates shuffle courtesy of Knuth */
+       while (--count) {
+               size_t swp;
+
+               swp = i915_prandom_u32_max_state(count + 1, state);
+               if (swp == count)
+                       continue;
 
-       for (i = 0; i < count; i++) {
-               BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32));
-               j = i915_prandom_u32_max_state(count, state);
-               swap(order[i], order[j]);
+               memcpy(stack, arr + count * elsz, elsz);
+               memcpy(arr + count * elsz, arr + swp * elsz, elsz);
+               memcpy(arr + swp * elsz, stack, elsz);
        }
 }
 
+void i915_random_reorder(unsigned int *order, unsigned int count,
+                        struct rnd_state *state)
+{
+       i915_prandom_shuffle(order, sizeof(*order), count, state);
+}
+
 unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
 {
        unsigned int *order, i;
index 7dffedc501cadaf1294dbe1ca4515a5db62c1cdb..8e1ff9c105b66a1ad74f99f08cdae9544987bfa8 100644 (file)
@@ -54,4 +54,7 @@ void i915_random_reorder(unsigned int *order,
                         unsigned int count,
                         struct rnd_state *state);
 
+void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
+                         struct rnd_state *state);
+
 #endif /* !__I915_SELFTESTS_RANDOM_H__ */
index 07e5578153088cbdb434b58afae4ae1f1ddccc08..6733dc5b6b4c84e19f89ca0ea24be967dc69a625 100644 (file)
 #include <linux/prime_numbers.h>
 
 #include "../i915_selftest.h"
+#include "i915_random.h"
+#include "igt_live_test.h"
+#include "lib_sw_fence.h"
 
 #include "mock_context.h"
+#include "mock_drm.h"
 #include "mock_gem_device.h"
 
 static int igt_add_request(void *arg)
@@ -246,93 +250,285 @@ err_context_0:
        return err;
 }
 
-int i915_request_mock_selftests(void)
+struct smoketest {
+       struct intel_engine_cs *engine;
+       struct i915_gem_context **contexts;
+       atomic_long_t num_waits, num_fences;
+       int ncontexts, max_batch;
+       struct i915_request *(*request_alloc)(struct i915_gem_context *,
+                                             struct intel_engine_cs *);
+};
+
+static struct i915_request *
+__mock_request_alloc(struct i915_gem_context *ctx,
+                    struct intel_engine_cs *engine)
 {
-       static const struct i915_subtest tests[] = {
-               SUBTEST(igt_add_request),
-               SUBTEST(igt_wait_request),
-               SUBTEST(igt_fence_wait),
-               SUBTEST(igt_request_rewind),
-       };
-       struct drm_i915_private *i915;
-       int err;
+       return mock_request(engine, ctx, 0);
+}
 
-       i915 = mock_gem_device();
-       if (!i915)
+static struct i915_request *
+__live_request_alloc(struct i915_gem_context *ctx,
+                    struct intel_engine_cs *engine)
+{
+       return i915_request_alloc(engine, ctx);
+}
+
+static int __igt_breadcrumbs_smoketest(void *arg)
+{
+       struct smoketest *t = arg;
+       struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
+       const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
+       const unsigned int total = 4 * t->ncontexts + 1;
+       unsigned int num_waits = 0, num_fences = 0;
+       struct i915_request **requests;
+       I915_RND_STATE(prng);
+       unsigned int *order;
+       int err = 0;
+
+       /*
+        * A very simple test to catch the most egregious of list handling bugs.
+        *
+        * At its heart, we simply create oodles of requests running across
+        * multiple kthreads and enable signaling on them, for the sole purpose
+        * of stressing our breadcrumb handling. The only inspection we do is
+        * that the fences were marked as signaled.
+        */
+
+       requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL);
+       if (!requests)
                return -ENOMEM;
 
-       err = i915_subtests(tests, i915);
-       drm_dev_put(&i915->drm);
+       order = i915_random_order(total, &prng);
+       if (!order) {
+               err = -ENOMEM;
+               goto out_requests;
+       }
 
-       return err;
-}
+       while (!kthread_should_stop()) {
+               struct i915_sw_fence *submit, *wait;
+               unsigned int n, count;
 
-struct live_test {
-       struct drm_i915_private *i915;
-       const char *func;
-       const char *name;
+               submit = heap_fence_create(GFP_KERNEL);
+               if (!submit) {
+                       err = -ENOMEM;
+                       break;
+               }
 
-       unsigned int reset_count;
-};
+               wait = heap_fence_create(GFP_KERNEL);
+               if (!wait) {
+                       i915_sw_fence_commit(submit);
+                       heap_fence_put(submit);
+                       err = ENOMEM;
+                       break;
+               }
 
-static int begin_live_test(struct live_test *t,
-                          struct drm_i915_private *i915,
-                          const char *func,
-                          const char *name)
-{
-       int err;
+               i915_random_reorder(order, total, &prng);
+               count = 1 + i915_prandom_u32_max_state(max_batch, &prng);
 
-       t->i915 = i915;
-       t->func = func;
-       t->name = name;
+               for (n = 0; n < count; n++) {
+                       struct i915_gem_context *ctx =
+                               t->contexts[order[n] % t->ncontexts];
+                       struct i915_request *rq;
 
-       err = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_LOCKED,
-                                    MAX_SCHEDULE_TIMEOUT);
-       if (err) {
-               pr_err("%s(%s): failed to idle before, with err=%d!",
-                      func, name, err);
-               return err;
+                       mutex_lock(BKL);
+
+                       rq = t->request_alloc(ctx, t->engine);
+                       if (IS_ERR(rq)) {
+                               mutex_unlock(BKL);
+                               err = PTR_ERR(rq);
+                               count = n;
+                               break;
+                       }
+
+                       err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+                                                              submit,
+                                                              GFP_KERNEL);
+
+                       requests[n] = i915_request_get(rq);
+                       i915_request_add(rq);
+
+                       mutex_unlock(BKL);
+
+                       if (err >= 0)
+                               err = i915_sw_fence_await_dma_fence(wait,
+                                                                   &rq->fence,
+                                                                   0,
+                                                                   GFP_KERNEL);
+
+                       if (err < 0) {
+                               i915_request_put(rq);
+                               count = n;
+                               break;
+                       }
+               }
+
+               i915_sw_fence_commit(submit);
+               i915_sw_fence_commit(wait);
+
+               if (!wait_event_timeout(wait->wait,
+                                       i915_sw_fence_done(wait),
+                                       HZ / 2)) {
+                       struct i915_request *rq = requests[count - 1];
+
+                       pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n",
+                              count,
+                              rq->fence.context, rq->fence.seqno,
+                              t->engine->name);
+                       i915_gem_set_wedged(t->engine->i915);
+                       GEM_BUG_ON(!i915_request_completed(rq));
+                       i915_sw_fence_wait(wait);
+                       err = -EIO;
+               }
+
+               for (n = 0; n < count; n++) {
+                       struct i915_request *rq = requests[n];
+
+                       if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                                     &rq->fence.flags)) {
+                               pr_err("%llu:%llu was not signaled!\n",
+                                      rq->fence.context, rq->fence.seqno);
+                               err = -EINVAL;
+                       }
+
+                       i915_request_put(rq);
+               }
+
+               heap_fence_put(wait);
+               heap_fence_put(submit);
+
+               if (err < 0)
+                       break;
+
+               num_fences += count;
+               num_waits++;
+
+               cond_resched();
        }
 
-       i915->gpu_error.missed_irq_rings = 0;
-       t->reset_count = i915_reset_count(&i915->gpu_error);
+       atomic_long_add(num_fences, &t->num_fences);
+       atomic_long_add(num_waits, &t->num_waits);
 
-       return 0;
+       kfree(order);
+out_requests:
+       kfree(requests);
+       return err;
 }
 
-static int end_live_test(struct live_test *t)
+static int mock_breadcrumbs_smoketest(void *arg)
 {
-       struct drm_i915_private *i915 = t->i915;
+       struct drm_i915_private *i915 = arg;
+       struct smoketest t = {
+               .engine = i915->engine[RCS],
+               .ncontexts = 1024,
+               .max_batch = 1024,
+               .request_alloc = __mock_request_alloc
+       };
+       unsigned int ncpus = num_online_cpus();
+       struct task_struct **threads;
+       unsigned int n;
+       int ret = 0;
+
+       /*
+        * Smoketest our breadcrumb/signal handling for requests across multiple
+        * threads. A very simple test to only catch the most egregious of bugs.
+        * See __igt_breadcrumbs_smoketest();
+        */
 
-       i915_retire_requests(i915);
+       threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+       if (!threads)
+               return -ENOMEM;
 
-       if (wait_for(intel_engines_are_idle(i915), 10)) {
-               pr_err("%s(%s): GPU not idle\n", t->func, t->name);
-               return -EIO;
+       t.contexts =
+               kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
+       if (!t.contexts) {
+               ret = -ENOMEM;
+               goto out_threads;
        }
 
-       if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
-               pr_err("%s(%s): GPU was reset %d times!\n",
-                      t->func, t->name,
-                      i915_reset_count(&i915->gpu_error) - t->reset_count);
-               return -EIO;
+       mutex_lock(&t.engine->i915->drm.struct_mutex);
+       for (n = 0; n < t.ncontexts; n++) {
+               t.contexts[n] = mock_context(t.engine->i915, "mock");
+               if (!t.contexts[n]) {
+                       ret = -ENOMEM;
+                       goto out_contexts;
+               }
        }
+       mutex_unlock(&t.engine->i915->drm.struct_mutex);
+
+       for (n = 0; n < ncpus; n++) {
+               threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
+                                        &t, "igt/%d", n);
+               if (IS_ERR(threads[n])) {
+                       ret = PTR_ERR(threads[n]);
+                       ncpus = n;
+                       break;
+               }
 
-       if (i915->gpu_error.missed_irq_rings) {
-               pr_err("%s(%s): Missed interrupts on engines %lx\n",
-                      t->func, t->name, i915->gpu_error.missed_irq_rings);
-               return -EIO;
+               get_task_struct(threads[n]);
        }
 
-       return 0;
+       msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+       for (n = 0; n < ncpus; n++) {
+               int err;
+
+               err = kthread_stop(threads[n]);
+               if (err < 0 && !ret)
+                       ret = err;
+
+               put_task_struct(threads[n]);
+       }
+       pr_info("Completed %lu waits for %lu fence across %d cpus\n",
+               atomic_long_read(&t.num_waits),
+               atomic_long_read(&t.num_fences),
+               ncpus);
+
+       mutex_lock(&t.engine->i915->drm.struct_mutex);
+out_contexts:
+       for (n = 0; n < t.ncontexts; n++) {
+               if (!t.contexts[n])
+                       break;
+               mock_context_close(t.contexts[n]);
+       }
+       mutex_unlock(&t.engine->i915->drm.struct_mutex);
+       kfree(t.contexts);
+out_threads:
+       kfree(threads);
+
+       return ret;
+}
+
+int i915_request_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_add_request),
+               SUBTEST(igt_wait_request),
+               SUBTEST(igt_fence_wait),
+               SUBTEST(igt_request_rewind),
+               SUBTEST(mock_breadcrumbs_smoketest),
+       };
+       struct drm_i915_private *i915;
+       intel_wakeref_t wakeref;
+       int err = 0;
+
+       i915 = mock_gem_device();
+       if (!i915)
+               return -ENOMEM;
+
+       with_intel_runtime_pm(i915, wakeref)
+               err = i915_subtests(tests, i915);
+
+       drm_dev_put(&i915->drm);
+
+       return err;
 }
 
 static int live_nop_request(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
-       struct live_test t;
+       intel_wakeref_t wakeref;
+       struct igt_live_test t;
        unsigned int id;
        int err = -ENODEV;
 
@@ -342,7 +538,7 @@ static int live_nop_request(void *arg)
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        for_each_engine(engine, i915, id) {
                struct i915_request *request = NULL;
@@ -350,7 +546,7 @@ static int live_nop_request(void *arg)
                IGT_TIMEOUT(end_time);
                ktime_t times[2] = {};
 
-               err = begin_live_test(&t, i915, __func__, engine->name);
+               err = igt_live_test_begin(&t, i915, __func__, engine->name);
                if (err)
                        goto out_unlock;
 
@@ -392,7 +588,7 @@ static int live_nop_request(void *arg)
                                break;
                }
 
-               err = end_live_test(&t);
+               err = igt_live_test_end(&t);
                if (err)
                        goto out_unlock;
 
@@ -403,7 +599,7 @@ static int live_nop_request(void *arg)
        }
 
 out_unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
@@ -478,7 +674,8 @@ static int live_empty_request(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
-       struct live_test t;
+       intel_wakeref_t wakeref;
+       struct igt_live_test t;
        struct i915_vma *batch;
        unsigned int id;
        int err = 0;
@@ -489,7 +686,7 @@ static int live_empty_request(void *arg)
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        batch = empty_batch(i915);
        if (IS_ERR(batch)) {
@@ -503,7 +700,7 @@ static int live_empty_request(void *arg)
                unsigned long n, prime;
                ktime_t times[2] = {};
 
-               err = begin_live_test(&t, i915, __func__, engine->name);
+               err = igt_live_test_begin(&t, i915, __func__, engine->name);
                if (err)
                        goto out_batch;
 
@@ -539,7 +736,7 @@ static int live_empty_request(void *arg)
                                break;
                }
 
-               err = end_live_test(&t);
+               err = igt_live_test_end(&t);
                if (err)
                        goto out_batch;
 
@@ -553,7 +750,7 @@ out_batch:
        i915_vma_unpin(batch);
        i915_vma_put(batch);
 out_unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
@@ -637,8 +834,9 @@ static int live_all_engines(void *arg)
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
        struct i915_request *request[I915_NUM_ENGINES];
+       intel_wakeref_t wakeref;
+       struct igt_live_test t;
        struct i915_vma *batch;
-       struct live_test t;
        unsigned int id;
        int err;
 
@@ -648,9 +846,9 @@ static int live_all_engines(void *arg)
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
-       err = begin_live_test(&t, i915, __func__, "");
+       err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
                goto out_unlock;
 
@@ -722,7 +920,7 @@ static int live_all_engines(void *arg)
                request[id] = NULL;
        }
 
-       err = end_live_test(&t);
+       err = igt_live_test_end(&t);
 
 out_request:
        for_each_engine(engine, i915, id)
@@ -731,7 +929,7 @@ out_request:
        i915_vma_unpin(batch);
        i915_vma_put(batch);
 out_unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
@@ -742,7 +940,8 @@ static int live_sequential_engines(void *arg)
        struct i915_request *request[I915_NUM_ENGINES] = {};
        struct i915_request *prev = NULL;
        struct intel_engine_cs *engine;
-       struct live_test t;
+       intel_wakeref_t wakeref;
+       struct igt_live_test t;
        unsigned int id;
        int err;
 
@@ -753,9 +952,9 @@ static int live_sequential_engines(void *arg)
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
-       err = begin_live_test(&t, i915, __func__, "");
+       err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
                goto out_unlock;
 
@@ -838,7 +1037,7 @@ static int live_sequential_engines(void *arg)
                GEM_BUG_ON(!i915_request_completed(request[id]));
        }
 
-       err = end_live_test(&t);
+       err = igt_live_test_end(&t);
 
 out_request:
        for_each_engine(engine, i915, id) {
@@ -860,11 +1059,183 @@ out_request:
                i915_request_put(request[id]);
        }
 out_unlock:
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
+static int
+max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+       struct i915_request *rq;
+       int ret;
+
+       /*
+        * Before execlists, all contexts share the same ringbuffer. With
+        * execlists, each context/engine has a separate ringbuffer and
+        * for the purposes of this test, inexhaustible.
+        *
+        * For the global ringbuffer though, we have to be very careful
+        * that we do not wrap while preventing the execution of requests
+        * with a unsignaled fence.
+        */
+       if (HAS_EXECLISTS(ctx->i915))
+               return INT_MAX;
+
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               ret = PTR_ERR(rq);
+       } else {
+               int sz;
+
+               ret = rq->ring->size - rq->reserved_space;
+               i915_request_add(rq);
+
+               sz = rq->ring->emit - rq->head;
+               if (sz < 0)
+                       sz += rq->ring->size;
+               ret /= sz;
+               ret /= 2; /* leave half spare, in case of emergency! */
+       }
+
+       return ret;
+}
+
+static int live_breadcrumbs_smoketest(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct smoketest t[I915_NUM_ENGINES];
+       unsigned int ncpus = num_online_cpus();
+       unsigned long num_waits, num_fences;
+       struct intel_engine_cs *engine;
+       struct task_struct **threads;
+       struct igt_live_test live;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       struct drm_file *file;
+       unsigned int n;
+       int ret = 0;
+
+       /*
+        * Smoketest our breadcrumb/signal handling for requests across multiple
+        * threads. A very simple test to only catch the most egregious of bugs.
+        * See __igt_breadcrumbs_smoketest();
+        *
+        * On real hardware this time.
+        */
+
+       wakeref = intel_runtime_pm_get(i915);
+
+       file = mock_file(i915);
+       if (IS_ERR(file)) {
+               ret = PTR_ERR(file);
+               goto out_rpm;
+       }
+
+       threads = kcalloc(ncpus * I915_NUM_ENGINES,
+                         sizeof(*threads),
+                         GFP_KERNEL);
+       if (!threads) {
+               ret = -ENOMEM;
+               goto out_file;
+       }
+
+       memset(&t[0], 0, sizeof(t[0]));
+       t[0].request_alloc = __live_request_alloc;
+       t[0].ncontexts = 64;
+       t[0].contexts = kmalloc_array(t[0].ncontexts,
+                                     sizeof(*t[0].contexts),
+                                     GFP_KERNEL);
+       if (!t[0].contexts) {
+               ret = -ENOMEM;
+               goto out_threads;
+       }
+
+       mutex_lock(&i915->drm.struct_mutex);
+       for (n = 0; n < t[0].ncontexts; n++) {
+               t[0].contexts[n] = live_context(i915, file);
+               if (!t[0].contexts[n]) {
+                       ret = -ENOMEM;
+                       goto out_contexts;
+               }
+       }
+
+       ret = igt_live_test_begin(&live, i915, __func__, "");
+       if (ret)
+               goto out_contexts;
+
+       for_each_engine(engine, i915, id) {
+               t[id] = t[0];
+               t[id].engine = engine;
+               t[id].max_batch = max_batches(t[0].contexts[0], engine);
+               if (t[id].max_batch < 0) {
+                       ret = t[id].max_batch;
+                       mutex_unlock(&i915->drm.struct_mutex);
+                       goto out_flush;
+               }
+               /* One ring interleaved between requests from all cpus */
+               t[id].max_batch /= num_online_cpus() + 1;
+               pr_debug("Limiting batches to %d requests on %s\n",
+                        t[id].max_batch, engine->name);
+
+               for (n = 0; n < ncpus; n++) {
+                       struct task_struct *tsk;
+
+                       tsk = kthread_run(__igt_breadcrumbs_smoketest,
+                                         &t[id], "igt/%d.%d", id, n);
+                       if (IS_ERR(tsk)) {
+                               ret = PTR_ERR(tsk);
+                               mutex_unlock(&i915->drm.struct_mutex);
+                               goto out_flush;
+                       }
+
+                       get_task_struct(tsk);
+                       threads[id * ncpus + n] = tsk;
+               }
+       }
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+out_flush:
+       num_waits = 0;
+       num_fences = 0;
+       for_each_engine(engine, i915, id) {
+               for (n = 0; n < ncpus; n++) {
+                       struct task_struct *tsk = threads[id * ncpus + n];
+                       int err;
+
+                       if (!tsk)
+                               continue;
+
+                       err = kthread_stop(tsk);
+                       if (err < 0 && !ret)
+                               ret = err;
+
+                       put_task_struct(tsk);
+               }
+
+               num_waits += atomic_long_read(&t[id].num_waits);
+               num_fences += atomic_long_read(&t[id].num_fences);
+       }
+       pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
+               num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       ret = igt_live_test_end(&live) ?: ret;
+out_contexts:
+       mutex_unlock(&i915->drm.struct_mutex);
+       kfree(t[0].contexts);
+out_threads:
+       kfree(threads);
+out_file:
+       mock_file_free(i915, file);
+out_rpm:
+       intel_runtime_pm_put(i915, wakeref);
+
+       return ret;
+}
+
 int i915_request_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
@@ -872,6 +1243,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_all_engines),
                SUBTEST(live_sequential_engines),
                SUBTEST(live_empty_request),
+               SUBTEST(live_breadcrumbs_smoketest),
        };
 
        if (i915_terminally_wedged(&i915->gpu_error))
index 86c54ea37f488c99847416a98db8fc0f9d4db033..10ef0e636a247c985db45409f158800f310ad3cb 100644 (file)
@@ -197,6 +197,49 @@ int i915_live_selftests(struct pci_dev *pdev)
        return 0;
 }
 
+static bool apply_subtest_filter(const char *caller, const char *name)
+{
+       char *filter, *sep, *tok;
+       bool result = true;
+
+       filter = kstrdup(i915_selftest.filter, GFP_KERNEL);
+       for (sep = filter; (tok = strsep(&sep, ","));) {
+               bool allow = true;
+               char *sl;
+
+               if (*tok == '!') {
+                       allow = false;
+                       tok++;
+               }
+
+               if (*tok == '\0')
+                       continue;
+
+               sl = strchr(tok, '/');
+               if (sl) {
+                       *sl++ = '\0';
+                       if (strcmp(tok, caller)) {
+                               if (allow)
+                                       result = false;
+                               continue;
+                       }
+                       tok = sl;
+               }
+
+               if (strcmp(tok, name)) {
+                       if (allow)
+                               result = false;
+                       continue;
+               }
+
+               result = allow;
+               break;
+       }
+       kfree(filter);
+
+       return result;
+}
+
 int __i915_subtests(const char *caller,
                    const struct i915_subtest *st,
                    unsigned int count,
@@ -209,6 +252,9 @@ int __i915_subtests(const char *caller,
                if (signal_pending(current))
                        return -EINTR;
 
+               if (!apply_subtest_filter(caller, st->name))
+                       continue;
+
                pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
                GEM_TRACE("Running %s/%s\n", caller, st->name);
 
@@ -244,6 +290,7 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
 
 module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
 module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
+module_param_named(st_filter, i915_selftest.filter, charp, 0400);
 
 module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400);
 MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then exit module)");
index 19f1c6a5c8fbc79550e9b2c56314baa3abde9080..12ea69b1a1e570d993f81e5fab2ca8f615e68071 100644 (file)
  * Copyright Â© 2017-2018 Intel Corporation
  */
 
+#include <linux/prime_numbers.h>
+
 #include "../i915_selftest.h"
 #include "i915_random.h"
 
+#include "igt_flush_test.h"
 #include "mock_gem_device.h"
 #include "mock_timeline.h"
 
+static struct page *hwsp_page(struct i915_timeline *tl)
+{
+       struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
+
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       return sg_page(obj->mm.pages->sgl);
+}
+
+static unsigned long hwsp_cacheline(struct i915_timeline *tl)
+{
+       unsigned long address = (unsigned long)page_address(hwsp_page(tl));
+
+       return (address + tl->hwsp_offset) / CACHELINE_BYTES;
+}
+
+#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
+
+struct mock_hwsp_freelist {
+       struct drm_i915_private *i915;
+       struct radix_tree_root cachelines;
+       struct i915_timeline **history;
+       unsigned long count, max;
+       struct rnd_state prng;
+};
+
+enum {
+       SHUFFLE = BIT(0),
+};
+
+static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
+                              unsigned int idx,
+                              struct i915_timeline *tl)
+{
+       tl = xchg(&state->history[idx], tl);
+       if (tl) {
+               radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
+               i915_timeline_put(tl);
+       }
+}
+
+static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
+                               unsigned int count,
+                               unsigned int flags)
+{
+       struct i915_timeline *tl;
+       unsigned int idx;
+
+       while (count--) {
+               unsigned long cacheline;
+               int err;
+
+               tl = i915_timeline_create(state->i915, "mock", NULL);
+               if (IS_ERR(tl))
+                       return PTR_ERR(tl);
+
+               cacheline = hwsp_cacheline(tl);
+               err = radix_tree_insert(&state->cachelines, cacheline, tl);
+               if (err) {
+                       if (err == -EEXIST) {
+                               pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
+                                      cacheline);
+                       }
+                       i915_timeline_put(tl);
+                       return err;
+               }
+
+               idx = state->count++ % state->max;
+               __mock_hwsp_record(state, idx, tl);
+       }
+
+       if (flags & SHUFFLE)
+               i915_prandom_shuffle(state->history,
+                                    sizeof(*state->history),
+                                    min(state->count, state->max),
+                                    &state->prng);
+
+       count = i915_prandom_u32_max_state(min(state->count, state->max),
+                                          &state->prng);
+       while (count--) {
+               idx = --state->count % state->max;
+               __mock_hwsp_record(state, idx, NULL);
+       }
+
+       return 0;
+}
+
+static int mock_hwsp_freelist(void *arg)
+{
+       struct mock_hwsp_freelist state;
+       const struct {
+               const char *name;
+               unsigned int flags;
+       } phases[] = {
+               { "linear", 0 },
+               { "shuffled", SHUFFLE },
+               { },
+       }, *p;
+       unsigned int na;
+       int err = 0;
+
+       INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
+       state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
+
+       state.i915 = mock_gem_device();
+       if (!state.i915)
+               return -ENOMEM;
+
+       /*
+        * Create a bunch of timelines and check that their HWSP do not overlap.
+        * Free some, and try again.
+        */
+
+       state.max = PAGE_SIZE / sizeof(*state.history);
+       state.count = 0;
+       state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
+       if (!state.history) {
+               err = -ENOMEM;
+               goto err_put;
+       }
+
+       mutex_lock(&state.i915->drm.struct_mutex);
+       for (p = phases; p->name; p++) {
+               pr_debug("%s(%s)\n", __func__, p->name);
+               for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
+                       err = __mock_hwsp_timeline(&state, na, p->flags);
+                       if (err)
+                               goto out;
+               }
+       }
+
+out:
+       for (na = 0; na < state.max; na++)
+               __mock_hwsp_record(&state, na, NULL);
+       mutex_unlock(&state.i915->drm.struct_mutex);
+       kfree(state.history);
+err_put:
+       drm_dev_put(&state.i915->drm);
+       return err;
+}
+
 struct __igt_sync {
        const char *name;
        u32 seqno;
@@ -256,12 +399,331 @@ static int bench_sync(void *arg)
        return 0;
 }
 
-int i915_gem_timeline_mock_selftests(void)
+int i915_timeline_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
+               SUBTEST(mock_hwsp_freelist),
                SUBTEST(igt_sync),
                SUBTEST(bench_sync),
        };
 
        return i915_subtests(tests, NULL);
 }
+
+static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
+{
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, 4);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       if (INTEL_GEN(rq->i915) >= 8) {
+               *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+               *cs++ = addr;
+               *cs++ = 0;
+               *cs++ = value;
+       } else if (INTEL_GEN(rq->i915) >= 4) {
+               *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+               *cs++ = 0;
+               *cs++ = addr;
+               *cs++ = value;
+       } else {
+               *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+               *cs++ = addr;
+               *cs++ = value;
+               *cs++ = MI_NOOP;
+       }
+
+       intel_ring_advance(rq, cs);
+
+       return 0;
+}
+
+static struct i915_request *
+tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
+{
+       struct i915_request *rq;
+       int err;
+
+       lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */
+
+       err = i915_timeline_pin(tl);
+       if (err) {
+               rq = ERR_PTR(err);
+               goto out;
+       }
+
+       rq = i915_request_alloc(engine, engine->i915->kernel_context);
+       if (IS_ERR(rq))
+               goto out_unpin;
+
+       err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
+       i915_request_add(rq);
+       if (err)
+               rq = ERR_PTR(err);
+
+out_unpin:
+       i915_timeline_unpin(tl);
+out:
+       if (IS_ERR(rq))
+               pr_err("Failed to write to timeline!\n");
+       return rq;
+}
+
+static struct i915_timeline *
+checked_i915_timeline_create(struct drm_i915_private *i915)
+{
+       struct i915_timeline *tl;
+
+       tl = i915_timeline_create(i915, "live", NULL);
+       if (IS_ERR(tl))
+               return tl;
+
+       if (*tl->hwsp_seqno != tl->seqno) {
+               pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
+                      *tl->hwsp_seqno, tl->seqno);
+               i915_timeline_put(tl);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return tl;
+}
+
+static int live_hwsp_engine(void *arg)
+{
+#define NUM_TIMELINES 4096
+       struct drm_i915_private *i915 = arg;
+       struct i915_timeline **timelines;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       unsigned long count, n;
+       int err = 0;
+
+       /*
+        * Create a bunch of timelines and check we can write
+        * independently to each of their breadcrumb slots.
+        */
+
+       timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+                                  sizeof(*timelines),
+                                  GFP_KERNEL);
+       if (!timelines)
+               return -ENOMEM;
+
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       count = 0;
+       for_each_engine(engine, i915, id) {
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               for (n = 0; n < NUM_TIMELINES; n++) {
+                       struct i915_timeline *tl;
+                       struct i915_request *rq;
+
+                       tl = checked_i915_timeline_create(i915);
+                       if (IS_ERR(tl)) {
+                               err = PTR_ERR(tl);
+                               goto out;
+                       }
+
+                       rq = tl_write(tl, engine, count);
+                       if (IS_ERR(rq)) {
+                               i915_timeline_put(tl);
+                               err = PTR_ERR(rq);
+                               goto out;
+                       }
+
+                       timelines[count++] = tl;
+               }
+       }
+
+out:
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+
+       for (n = 0; n < count; n++) {
+               struct i915_timeline *tl = timelines[n];
+
+               if (!err && *tl->hwsp_seqno != n) {
+                       pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+                              n, *tl->hwsp_seqno);
+                       err = -EINVAL;
+               }
+               i915_timeline_put(tl);
+       }
+
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       kvfree(timelines);
+
+       return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_alternate(void *arg)
+{
+#define NUM_TIMELINES 4096
+       struct drm_i915_private *i915 = arg;
+       struct i915_timeline **timelines;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       unsigned long count, n;
+       int err = 0;
+
+       /*
+        * Create a bunch of timelines and check we can write
+        * independently to each of their breadcrumb slots with adjacent
+        * engines.
+        */
+
+       timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+                                  sizeof(*timelines),
+                                  GFP_KERNEL);
+       if (!timelines)
+               return -ENOMEM;
+
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       count = 0;
+       for (n = 0; n < NUM_TIMELINES; n++) {
+               for_each_engine(engine, i915, id) {
+                       struct i915_timeline *tl;
+                       struct i915_request *rq;
+
+                       if (!intel_engine_can_store_dword(engine))
+                               continue;
+
+                       tl = checked_i915_timeline_create(i915);
+                       if (IS_ERR(tl)) {
+                               err = PTR_ERR(tl);
+                               goto out;
+                       }
+
+                       rq = tl_write(tl, engine, count);
+                       if (IS_ERR(rq)) {
+                               i915_timeline_put(tl);
+                               err = PTR_ERR(rq);
+                               goto out;
+                       }
+
+                       timelines[count++] = tl;
+               }
+       }
+
+out:
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+
+       for (n = 0; n < count; n++) {
+               struct i915_timeline *tl = timelines[n];
+
+               if (!err && *tl->hwsp_seqno != n) {
+                       pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+                              n, *tl->hwsp_seqno);
+                       err = -EINVAL;
+               }
+               i915_timeline_put(tl);
+       }
+
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       kvfree(timelines);
+
+       return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_recycle(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       unsigned long count;
+       int err = 0;
+
+       /*
+        * Check seqno writes into one timeline at a time. We expect to
+        * recycle the breadcrumb slot between iterations and neither
+        * want to confuse ourselves or the GPU.
+        */
+
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       count = 0;
+       for_each_engine(engine, i915, id) {
+               IGT_TIMEOUT(end_time);
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               do {
+                       struct i915_timeline *tl;
+                       struct i915_request *rq;
+
+                       tl = checked_i915_timeline_create(i915);
+                       if (IS_ERR(tl)) {
+                               err = PTR_ERR(tl);
+                               goto out;
+                       }
+
+                       rq = tl_write(tl, engine, count);
+                       if (IS_ERR(rq)) {
+                               i915_timeline_put(tl);
+                               err = PTR_ERR(rq);
+                               goto out;
+                       }
+
+                       if (i915_request_wait(rq,
+                                             I915_WAIT_LOCKED,
+                                             HZ / 5) < 0) {
+                               pr_err("Wait for timeline writes timed out!\n");
+                               i915_timeline_put(tl);
+                               err = -EIO;
+                               goto out;
+                       }
+
+                       if (*tl->hwsp_seqno != count) {
+                               pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+                                      count, *tl->hwsp_seqno);
+                               err = -EINVAL;
+                       }
+
+                       i915_timeline_put(tl);
+                       count++;
+
+                       if (err)
+                               goto out;
+
+                       i915_timelines_park(i915); /* Encourage recycling! */
+               } while (!__igt_timeout(end_time, NULL));
+       }
+
+out:
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       return err;
+}
+
+int i915_timeline_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(live_hwsp_recycle),
+               SUBTEST(live_hwsp_engine),
+               SUBTEST(live_hwsp_alternate),
+       };
+
+       return i915_subtests(tests, i915);
+}
index ffa74290e0547a77f762a890b992277f2459419f..cf1de82741fa7c32bcb523f51dacfd46bd4a1ea9 100644 (file)
@@ -28,6 +28,7 @@
 
 #include "mock_gem_device.h"
 #include "mock_context.h"
+#include "mock_gtt.h"
 
 static bool assert_vma(struct i915_vma *vma,
                       struct drm_i915_gem_object *obj,
@@ -141,7 +142,8 @@ static int create_vmas(struct drm_i915_private *i915,
 
 static int igt_vma_create(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct i915_ggtt *ggtt = arg;
+       struct drm_i915_private *i915 = ggtt->vm.i915;
        struct drm_i915_gem_object *obj, *on;
        struct i915_gem_context *ctx, *cn;
        unsigned long num_obj, num_ctx;
@@ -245,7 +247,7 @@ static bool assert_pin_einval(const struct i915_vma *vma,
 
 static int igt_vma_pin1(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct i915_ggtt *ggtt = arg;
        const struct pin_mode modes[] = {
 #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
 #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
@@ -256,30 +258,30 @@ static int igt_vma_pin1(void *arg)
 
                VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
                VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
-               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-               VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
-
-               VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
-               INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
-               VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
-               INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
+               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+               VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
+
+               VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+               INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | ggtt->mappable_end),
+               VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
+               INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total),
                INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
 
                VALID(4096, PIN_GLOBAL),
                VALID(8192, PIN_GLOBAL),
-               VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
-               VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
-               NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
-               VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
-               VALID(i915->ggtt.vm.total, PIN_GLOBAL),
-               NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
+               VALID(ggtt->mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
+               VALID(ggtt->mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
+               NOSPACE(ggtt->mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
+               VALID(ggtt->vm.total - 4096, PIN_GLOBAL),
+               VALID(ggtt->vm.total, PIN_GLOBAL),
+               NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL),
                NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
-               INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
-               INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+               INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+               INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
                INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
 
-               VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+               VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
 
 #if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
                /* Misusing BIAS is a programming error (it is not controllable
@@ -287,10 +289,10 @@ static int igt_vma_pin1(void *arg)
                 * However, the tests are still quite interesting for checking
                 * variable start, end and size.
                 */
-               NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
-               NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
-               NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-               NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
+               NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | ggtt->mappable_end),
+               NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total),
+               NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+               NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
 #endif
                { },
 #undef NOSPACE
@@ -306,13 +308,13 @@ static int igt_vma_pin1(void *arg)
         * focusing on error handling of boundary conditions.
         */
 
-       GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
+       GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm));
 
-       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
+       vma = checked_vma_instance(obj, &ggtt->vm, NULL);
        if (IS_ERR(vma))
                goto out;
 
@@ -403,8 +405,8 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
 
 static int igt_vma_rotate(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
-       struct i915_address_space *vm = &i915->ggtt.vm;
+       struct i915_ggtt *ggtt = arg;
+       struct i915_address_space *vm = &ggtt->vm;
        struct drm_i915_gem_object *obj;
        const struct intel_rotation_plane_info planes[] = {
                { .width = 1, .height = 1, .stride = 1 },
@@ -431,7 +433,7 @@ static int igt_vma_rotate(void *arg)
         * that the page layout within the rotated VMA match our expectations.
         */
 
-       obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE);
+       obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE);
        if (IS_ERR(obj))
                goto out;
 
@@ -602,8 +604,8 @@ static bool assert_pin(struct i915_vma *vma,
 
 static int igt_vma_partial(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
-       struct i915_address_space *vm = &i915->ggtt.vm;
+       struct i915_ggtt *ggtt = arg;
+       struct i915_address_space *vm = &ggtt->vm;
        const unsigned int npages = 1021; /* prime! */
        struct drm_i915_gem_object *obj;
        const struct phase {
@@ -621,7 +623,7 @@ static int igt_vma_partial(void *arg)
         * we are returned the same VMA when we later request the same range.
         */
 
-       obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE);
+       obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE);
        if (IS_ERR(obj))
                goto out;
 
@@ -670,7 +672,7 @@ static int igt_vma_partial(void *arg)
                }
 
                count = 0;
-               list_for_each_entry(vma, &obj->vma_list, obj_link)
+               list_for_each_entry(vma, &obj->vma.list, obj_link)
                        count++;
                if (count != nvma) {
                        pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n",
@@ -699,7 +701,7 @@ static int igt_vma_partial(void *arg)
                i915_vma_unpin(vma);
 
                count = 0;
-               list_for_each_entry(vma, &obj->vma_list, obj_link)
+               list_for_each_entry(vma, &obj->vma.list, obj_link)
                        count++;
                if (count != nvma) {
                        pr_err("(%s) allocated an extra full vma!\n", p->name);
@@ -723,17 +725,24 @@ int i915_vma_mock_selftests(void)
                SUBTEST(igt_vma_partial),
        };
        struct drm_i915_private *i915;
+       struct i915_ggtt ggtt;
        int err;
 
        i915 = mock_gem_device();
        if (!i915)
                return -ENOMEM;
 
+       mock_init_ggtt(i915, &ggtt);
+
        mutex_lock(&i915->drm.struct_mutex);
-       err = i915_subtests(tests, i915);
+       err = i915_subtests(tests, &ggtt);
+       mock_device_flush(i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
+       i915_gem_drain_freed_objects(i915);
+
+       mock_fini_ggtt(&ggtt);
        drm_dev_put(&i915->drm);
+
        return err;
 }
-
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
new file mode 100644 (file)
index 0000000..3e90276
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2018 Intel Corporation
+ */
+
+#include "../i915_drv.h"
+
+#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_live_test.h"
+
+int igt_live_test_begin(struct igt_live_test *t,
+                       struct drm_i915_private *i915,
+                       const char *func,
+                       const char *name)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int err;
+
+       lockdep_assert_held(&i915->drm.struct_mutex);
+
+       t->i915 = i915;
+       t->func = func;
+       t->name = name;
+
+       err = i915_gem_wait_for_idle(i915,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED,
+                                    MAX_SCHEDULE_TIMEOUT);
+       if (err) {
+               pr_err("%s(%s): failed to idle before, with err=%d!",
+                      func, name, err);
+               return err;
+       }
+
+       t->reset_global = i915_reset_count(&i915->gpu_error);
+
+       for_each_engine(engine, i915, id)
+               t->reset_engine[id] =
+                       i915_reset_engine_count(&i915->gpu_error, engine);
+
+       return 0;
+}
+
+int igt_live_test_end(struct igt_live_test *t)
+{
+       struct drm_i915_private *i915 = t->i915;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       lockdep_assert_held(&i915->drm.struct_mutex);
+
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               return -EIO;
+
+       if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
+               pr_err("%s(%s): GPU was reset %d times!\n",
+                      t->func, t->name,
+                      i915_reset_count(&i915->gpu_error) - t->reset_global);
+               return -EIO;
+       }
+
+       for_each_engine(engine, i915, id) {
+               if (t->reset_engine[id] ==
+                   i915_reset_engine_count(&i915->gpu_error, engine))
+                       continue;
+
+               pr_err("%s(%s): engine '%s' was reset %d times!\n",
+                      t->func, t->name, engine->name,
+                      i915_reset_engine_count(&i915->gpu_error, engine) -
+                      t->reset_engine[id]);
+               return -EIO;
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
new file mode 100644 (file)
index 0000000..c0e9f99
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#ifndef IGT_LIVE_TEST_H
+#define IGT_LIVE_TEST_H
+
+#include "../i915_gem.h"
+
+struct drm_i915_private;
+
+struct igt_live_test {
+       struct drm_i915_private *i915;
+       const char *func;
+       const char *name;
+
+       unsigned int reset_global;
+       unsigned int reset_engine[I915_NUM_ENGINES];
+};
+
+/*
+ * Flush the GPU state before and after the test to ensure that no residual
+ * code is running on the GPU that may affect this test. Also compare the
+ * state before and after the test and alert if it unexpectedly changes,
+ * e.g. if the GPU was reset.
+ */
+int igt_live_test_begin(struct igt_live_test *t,
+                       struct drm_i915_private *i915,
+                       const char *func,
+                       const char *name);
+int igt_live_test_end(struct igt_live_test *t);
+
+#endif /* IGT_LIVE_TEST_H */
index 8cd34f6e6859d6882b0c2ac11b78d8566a8996c8..9ebd9225684e8743f2b8db772de4c9d9f7f45465 100644 (file)
@@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws,
        return hws->node.start + seqno_offset(rq->fence.context);
 }
 
-static int emit_recurse_batch(struct igt_spinner *spin,
-                             struct i915_request *rq,
-                             u32 arbitration_command)
+static int move_to_active(struct i915_vma *vma,
+                         struct i915_request *rq,
+                         unsigned int flags)
 {
-       struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+       int err;
+
+       err = i915_vma_move_to_active(vma, rq, flags);
+       if (err)
+               return err;
+
+       if (!i915_gem_object_has_active_reference(vma->obj)) {
+               i915_gem_object_get(vma->obj);
+               i915_gem_object_set_active_reference(vma->obj);
+       }
+
+       return 0;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+                          struct i915_gem_context *ctx,
+                          struct intel_engine_cs *engine,
+                          u32 arbitration_command)
+{
+       struct i915_address_space *vm = &ctx->ppgtt->vm;
+       struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
        u32 *batch;
        int err;
 
        vma = i915_vma_instance(spin->obj, vm, NULL);
        if (IS_ERR(vma))
-               return PTR_ERR(vma);
+               return ERR_CAST(vma);
 
        hws = i915_vma_instance(spin->hws, vm, NULL);
        if (IS_ERR(hws))
-               return PTR_ERR(hws);
+               return ERR_CAST(hws);
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (err)
-               return err;
+               return ERR_PTR(err);
 
        err = i915_vma_pin(hws, 0, 0, PIN_USER);
        if (err)
                goto unpin_vma;
 
-       err = i915_vma_move_to_active(vma, rq, 0);
-       if (err)
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
                goto unpin_hws;
-
-       if (!i915_gem_object_has_active_reference(vma->obj)) {
-               i915_gem_object_get(vma->obj);
-               i915_gem_object_set_active_reference(vma->obj);
        }
 
-       err = i915_vma_move_to_active(hws, rq, 0);
+       err = move_to_active(vma, rq, 0);
        if (err)
-               goto unpin_hws;
+               goto cancel_rq;
 
-       if (!i915_gem_object_has_active_reference(hws->obj)) {
-               i915_gem_object_get(hws->obj);
-               i915_gem_object_set_active_reference(hws->obj);
-       }
+       err = move_to_active(hws, rq, 0);
+       if (err)
+               goto cancel_rq;
 
        batch = spin->batch;
 
@@ -127,35 +144,18 @@ static int emit_recurse_batch(struct igt_spinner *spin,
 
        i915_gem_chipset_flush(spin->i915);
 
-       err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+       err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 
+cancel_rq:
+       if (err) {
+               i915_request_skip(rq, err);
+               i915_request_add(rq);
+       }
 unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
-       return err;
-}
-
-struct i915_request *
-igt_spinner_create_request(struct igt_spinner *spin,
-                          struct i915_gem_context *ctx,
-                          struct intel_engine_cs *engine,
-                          u32 arbitration_command)
-{
-       struct i915_request *rq;
-       int err;
-
-       rq = i915_request_alloc(engine, ctx);
-       if (IS_ERR(rq))
-               return rq;
-
-       err = emit_recurse_batch(spin, rq, arbitration_command);
-       if (err) {
-               i915_request_add(rq);
-               return ERR_PTR(err);
-       }
-
-       return rq;
+       return err ? ERR_PTR(err) : rq;
 }
 
 static u32
@@ -185,11 +185,6 @@ void igt_spinner_fini(struct igt_spinner *spin)
 
 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
 {
-       if (!wait_event_timeout(rq->execute,
-                               READ_ONCE(rq->global_seqno),
-                               msecs_to_jiffies(10)))
-               return false;
-
        return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
                                               rq->fence.seqno),
                             10) &&
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
deleted file mode 100644 (file)
index f03b407..0000000
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Copyright Â© 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "../i915_selftest.h"
-#include "i915_random.h"
-
-#include "mock_gem_device.h"
-#include "mock_engine.h"
-
-static int check_rbtree(struct intel_engine_cs *engine,
-                       const unsigned long *bitmap,
-                       const struct intel_wait *waiters,
-                       const int count)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct rb_node *rb;
-       int n;
-
-       if (&b->irq_wait->node != rb_first(&b->waiters)) {
-               pr_err("First waiter does not match first element of wait-tree\n");
-               return -EINVAL;
-       }
-
-       n = find_first_bit(bitmap, count);
-       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
-               struct intel_wait *w = container_of(rb, typeof(*w), node);
-               int idx = w - waiters;
-
-               if (!test_bit(idx, bitmap)) {
-                       pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n",
-                              idx, w->seqno);
-                       return -EINVAL;
-               }
-
-               if (n != idx) {
-                       pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n",
-                              idx, w->seqno, n);
-                       return -EINVAL;
-               }
-
-               n = find_next_bit(bitmap, count, n + 1);
-       }
-
-       return 0;
-}
-
-static int check_completion(struct intel_engine_cs *engine,
-                           const unsigned long *bitmap,
-                           const struct intel_wait *waiters,
-                           const int count)
-{
-       int n;
-
-       for (n = 0; n < count; n++) {
-               if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap))
-                       continue;
-
-               pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n",
-                      n, waiters[n].seqno,
-                      intel_wait_complete(&waiters[n]) ? "complete" : "active",
-                      test_bit(n, bitmap) ? "active" : "complete");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int check_rbtree_empty(struct intel_engine_cs *engine)
-{
-       struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
-       if (b->irq_wait) {
-               pr_err("Empty breadcrumbs still has a waiter\n");
-               return -EINVAL;
-       }
-
-       if (!RB_EMPTY_ROOT(&b->waiters)) {
-               pr_err("Empty breadcrumbs, but wait-tree not empty\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int igt_random_insert_remove(void *arg)
-{
-       const u32 seqno_bias = 0x1000;
-       I915_RND_STATE(prng);
-       struct intel_engine_cs *engine = arg;
-       struct intel_wait *waiters;
-       const int count = 4096;
-       unsigned int *order;
-       unsigned long *bitmap;
-       int err = -ENOMEM;
-       int n;
-
-       mock_engine_reset(engine);
-
-       waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
-       if (!waiters)
-               goto out_engines;
-
-       bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
-                        GFP_KERNEL);
-       if (!bitmap)
-               goto out_waiters;
-
-       order = i915_random_order(count, &prng);
-       if (!order)
-               goto out_bitmap;
-
-       for (n = 0; n < count; n++)
-               intel_wait_init_for_seqno(&waiters[n], seqno_bias + n);
-
-       err = check_rbtree(engine, bitmap, waiters, count);
-       if (err)
-               goto out_order;
-
-       /* Add and remove waiters into the rbtree in random order. At each
-        * step, we verify that the rbtree is correctly ordered.
-        */
-       for (n = 0; n < count; n++) {
-               int i = order[n];
-
-               intel_engine_add_wait(engine, &waiters[i]);
-               __set_bit(i, bitmap);
-
-               err = check_rbtree(engine, bitmap, waiters, count);
-               if (err)
-                       goto out_order;
-       }
-
-       i915_random_reorder(order, count, &prng);
-       for (n = 0; n < count; n++) {
-               int i = order[n];
-
-               intel_engine_remove_wait(engine, &waiters[i]);
-               __clear_bit(i, bitmap);
-
-               err = check_rbtree(engine, bitmap, waiters, count);
-               if (err)
-                       goto out_order;
-       }
-
-       err = check_rbtree_empty(engine);
-out_order:
-       kfree(order);
-out_bitmap:
-       kfree(bitmap);
-out_waiters:
-       kvfree(waiters);
-out_engines:
-       mock_engine_flush(engine);
-       return err;
-}
-
-static int igt_insert_complete(void *arg)
-{
-       const u32 seqno_bias = 0x1000;
-       struct intel_engine_cs *engine = arg;
-       struct intel_wait *waiters;
-       const int count = 4096;
-       unsigned long *bitmap;
-       int err = -ENOMEM;
-       int n, m;
-
-       mock_engine_reset(engine);
-
-       waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
-       if (!waiters)
-               goto out_engines;
-
-       bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
-                        GFP_KERNEL);
-       if (!bitmap)
-               goto out_waiters;
-
-       for (n = 0; n < count; n++) {
-               intel_wait_init_for_seqno(&waiters[n], n + seqno_bias);
-               intel_engine_add_wait(engine, &waiters[n]);
-               __set_bit(n, bitmap);
-       }
-       err = check_rbtree(engine, bitmap, waiters, count);
-       if (err)
-               goto out_bitmap;
-
-       /* On each step, we advance the seqno so that several waiters are then
-        * complete (we increase the seqno by increasingly larger values to
-        * retire more and more waiters at once). All retired waiters should
-        * be woken and removed from the rbtree, and so that we check.
-        */
-       for (n = 0; n < count; n = m) {
-               int seqno = 2 * n;
-
-               GEM_BUG_ON(find_first_bit(bitmap, count) != n);
-
-               if (intel_wait_complete(&waiters[n])) {
-                       pr_err("waiter[%d, seqno=%d] completed too early\n",
-                              n, waiters[n].seqno);
-                       err = -EINVAL;
-                       goto out_bitmap;
-               }
-
-               /* complete the following waiters */
-               mock_seqno_advance(engine, seqno + seqno_bias);
-               for (m = n; m <= seqno; m++) {
-                       if (m == count)
-                               break;
-
-                       GEM_BUG_ON(!test_bit(m, bitmap));
-                       __clear_bit(m, bitmap);
-               }
-
-               intel_engine_remove_wait(engine, &waiters[n]);
-               RB_CLEAR_NODE(&waiters[n].node);
-
-               err = check_rbtree(engine, bitmap, waiters, count);
-               if (err) {
-                       pr_err("rbtree corrupt after seqno advance to %d\n",
-                              seqno + seqno_bias);
-                       goto out_bitmap;
-               }
-
-               err = check_completion(engine, bitmap, waiters, count);
-               if (err) {
-                       pr_err("completions after seqno advance to %d failed\n",
-                              seqno + seqno_bias);
-                       goto out_bitmap;
-               }
-       }
-
-       err = check_rbtree_empty(engine);
-out_bitmap:
-       kfree(bitmap);
-out_waiters:
-       kvfree(waiters);
-out_engines:
-       mock_engine_flush(engine);
-       return err;
-}
-
-struct igt_wakeup {
-       struct task_struct *tsk;
-       atomic_t *ready, *set, *done;
-       struct intel_engine_cs *engine;
-       unsigned long flags;
-#define STOP 0
-#define IDLE 1
-       wait_queue_head_t *wq;
-       u32 seqno;
-};
-
-static bool wait_for_ready(struct igt_wakeup *w)
-{
-       DEFINE_WAIT(ready);
-
-       set_bit(IDLE, &w->flags);
-       if (atomic_dec_and_test(w->done))
-               wake_up_var(w->done);
-
-       if (test_bit(STOP, &w->flags))
-               goto out;
-
-       for (;;) {
-               prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
-               if (atomic_read(w->ready) == 0)
-                       break;
-
-               schedule();
-       }
-       finish_wait(w->wq, &ready);
-
-out:
-       clear_bit(IDLE, &w->flags);
-       if (atomic_dec_and_test(w->set))
-               wake_up_var(w->set);
-
-       return !test_bit(STOP, &w->flags);
-}
-
-static int igt_wakeup_thread(void *arg)
-{
-       struct igt_wakeup *w = arg;
-       struct intel_wait wait;
-
-       while (wait_for_ready(w)) {
-               GEM_BUG_ON(kthread_should_stop());
-
-               intel_wait_init_for_seqno(&wait, w->seqno);
-               intel_engine_add_wait(w->engine, &wait);
-               for (;;) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
-                                             w->seqno))
-                               break;
-
-                       if (test_bit(STOP, &w->flags)) /* emergency escape */
-                               break;
-
-                       schedule();
-               }
-               intel_engine_remove_wait(w->engine, &wait);
-               __set_current_state(TASK_RUNNING);
-       }
-
-       return 0;
-}
-
-static void igt_wake_all_sync(atomic_t *ready,
-                             atomic_t *set,
-                             atomic_t *done,
-                             wait_queue_head_t *wq,
-                             int count)
-{
-       atomic_set(set, count);
-       atomic_set(ready, 0);
-       wake_up_all(wq);
-
-       wait_var_event(set, !atomic_read(set));
-       atomic_set(ready, count);
-       atomic_set(done, count);
-}
-
-static int igt_wakeup(void *arg)
-{
-       I915_RND_STATE(prng);
-       struct intel_engine_cs *engine = arg;
-       struct igt_wakeup *waiters;
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
-       const int count = 4096;
-       const u32 max_seqno = count / 4;
-       atomic_t ready, set, done;
-       int err = -ENOMEM;
-       int n, step;
-
-       mock_engine_reset(engine);
-
-       waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
-       if (!waiters)
-               goto out_engines;
-
-       /* Create a large number of threads, each waiting on a random seqno.
-        * Multiple waiters will be waiting for the same seqno.
-        */
-       atomic_set(&ready, count);
-       for (n = 0; n < count; n++) {
-               waiters[n].wq = &wq;
-               waiters[n].ready = &ready;
-               waiters[n].set = &set;
-               waiters[n].done = &done;
-               waiters[n].engine = engine;
-               waiters[n].flags = BIT(IDLE);
-
-               waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
-                                            "i915/igt:%d", n);
-               if (IS_ERR(waiters[n].tsk))
-                       goto out_waiters;
-
-               get_task_struct(waiters[n].tsk);
-       }
-
-       for (step = 1; step <= max_seqno; step <<= 1) {
-               u32 seqno;
-
-               /* The waiter threads start paused as we assign them a random
-                * seqno and reset the engine. Once the engine is reset,
-                * we signal that the threads may begin their wait upon their
-                * seqno.
-                */
-               for (n = 0; n < count; n++) {
-                       GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
-                       waiters[n].seqno =
-                               1 + prandom_u32_state(&prng) % max_seqno;
-               }
-               mock_seqno_advance(engine, 0);
-               igt_wake_all_sync(&ready, &set, &done, &wq, count);
-
-               /* Simulate the GPU doing chunks of work, with one or more
-                * seqno appearing to finish at the same time. A random number
-                * of threads will be waiting upon the update and hopefully be
-                * woken.
-                */
-               for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
-                       usleep_range(50, 500);
-                       mock_seqno_advance(engine, seqno);
-               }
-               GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);
-
-               /* With the seqno now beyond any of the waiting threads, they
-                * should all be woken, see that they are complete and signal
-                * that they are ready for the next test. We wait until all
-                * threads are complete and waiting for us (i.e. not a seqno).
-                */
-               if (!wait_var_event_timeout(&done,
-                                           !atomic_read(&done), 10 * HZ)) {
-                       pr_err("Timed out waiting for %d remaining waiters\n",
-                              atomic_read(&done));
-                       err = -ETIMEDOUT;
-                       break;
-               }
-
-               err = check_rbtree_empty(engine);
-               if (err)
-                       break;
-       }
-
-out_waiters:
-       for (n = 0; n < count; n++) {
-               if (IS_ERR(waiters[n].tsk))
-                       break;
-
-               set_bit(STOP, &waiters[n].flags);
-       }
-       mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
-       igt_wake_all_sync(&ready, &set, &done, &wq, n);
-
-       for (n = 0; n < count; n++) {
-               if (IS_ERR(waiters[n].tsk))
-                       break;
-
-               kthread_stop(waiters[n].tsk);
-               put_task_struct(waiters[n].tsk);
-       }
-
-       kvfree(waiters);
-out_engines:
-       mock_engine_flush(engine);
-       return err;
-}
-
-int intel_breadcrumbs_mock_selftests(void)
-{
-       static const struct i915_subtest tests[] = {
-               SUBTEST(igt_random_insert_remove),
-               SUBTEST(igt_insert_complete),
-               SUBTEST(igt_wakeup),
-       };
-       struct drm_i915_private *i915;
-       int err;
-
-       i915 = mock_gem_device();
-       if (!i915)
-               return -ENOMEM;
-
-       err = i915_subtests(tests, i915->engine[RCS]);
-       drm_dev_put(&i915->drm);
-
-       return err;
-}
index 32cba4cae31afad754a8e66df51bd01bdaff19cf..c5e0a0e98fcb16d499b082357f99a48a27359011 100644 (file)
@@ -137,12 +137,13 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
 static int igt_guc_clients(void *args)
 {
        struct drm_i915_private *dev_priv = args;
+       intel_wakeref_t wakeref;
        struct intel_guc *guc;
        int err = 0;
 
        GEM_BUG_ON(!HAS_GUC(dev_priv));
        mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        guc = &dev_priv->guc;
        if (!guc) {
@@ -225,7 +226,7 @@ out:
        guc_clients_create(guc);
        guc_clients_enable(guc);
 unlock:
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return err;
 }
@@ -238,13 +239,14 @@ unlock:
 static int igt_guc_doorbells(void *arg)
 {
        struct drm_i915_private *dev_priv = arg;
+       intel_wakeref_t wakeref;
        struct intel_guc *guc;
        int i, err = 0;
        u16 db_id;
 
        GEM_BUG_ON(!HAS_GUC(dev_priv));
        mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        guc = &dev_priv->guc;
        if (!guc) {
@@ -337,7 +339,7 @@ out:
                        guc_client_free(clients[i]);
                }
 unlock:
-       intel_runtime_pm_put(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return err;
 }
index 5910da3e7d7991f871342f733703dac28f01a1a5..7b6f3bea9ef8cec72c2d27de337313b7a7cdbcc8 100644 (file)
@@ -103,52 +103,87 @@ static u64 hws_address(const struct i915_vma *hws,
        return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
 }
 
-static int emit_recurse_batch(struct hang *h,
-                             struct i915_request *rq)
+static int move_to_active(struct i915_vma *vma,
+                         struct i915_request *rq,
+                         unsigned int flags)
+{
+       int err;
+
+       err = i915_vma_move_to_active(vma, rq, flags);
+       if (err)
+               return err;
+
+       if (!i915_gem_object_has_active_reference(vma->obj)) {
+               i915_gem_object_get(vma->obj);
+               i915_gem_object_set_active_reference(vma->obj);
+       }
+
+       return 0;
+}
+
+static struct i915_request *
+hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = h->i915;
        struct i915_address_space *vm =
-               rq->gem_context->ppgtt ?
-               &rq->gem_context->ppgtt->vm :
-               &i915->ggtt.vm;
+               h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
+       struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
        unsigned int flags;
        u32 *batch;
        int err;
 
+       if (i915_gem_object_is_active(h->obj)) {
+               struct drm_i915_gem_object *obj;
+               void *vaddr;
+
+               obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+               if (IS_ERR(obj))
+                       return ERR_CAST(obj);
+
+               vaddr = i915_gem_object_pin_map(obj,
+                                               i915_coherent_map_type(h->i915));
+               if (IS_ERR(vaddr)) {
+                       i915_gem_object_put(obj);
+                       return ERR_CAST(vaddr);
+               }
+
+               i915_gem_object_unpin_map(h->obj);
+               i915_gem_object_put(h->obj);
+
+               h->obj = obj;
+               h->batch = vaddr;
+       }
+
        vma = i915_vma_instance(h->obj, vm, NULL);
        if (IS_ERR(vma))
-               return PTR_ERR(vma);
+               return ERR_CAST(vma);
 
        hws = i915_vma_instance(h->hws, vm, NULL);
        if (IS_ERR(hws))
-               return PTR_ERR(hws);
+               return ERR_CAST(hws);
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (err)
-               return err;
+               return ERR_PTR(err);
 
        err = i915_vma_pin(hws, 0, 0, PIN_USER);
        if (err)
                goto unpin_vma;
 
-       err = i915_vma_move_to_active(vma, rq, 0);
-       if (err)
+       rq = i915_request_alloc(engine, h->ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
                goto unpin_hws;
-
-       if (!i915_gem_object_has_active_reference(vma->obj)) {
-               i915_gem_object_get(vma->obj);
-               i915_gem_object_set_active_reference(vma->obj);
        }
 
-       err = i915_vma_move_to_active(hws, rq, 0);
+       err = move_to_active(vma, rq, 0);
        if (err)
-               goto unpin_hws;
+               goto cancel_rq;
 
-       if (!i915_gem_object_has_active_reference(hws->obj)) {
-               i915_gem_object_get(hws->obj);
-               i915_gem_object_set_active_reference(hws->obj);
-       }
+       err = move_to_active(hws, rq, 0);
+       if (err)
+               goto cancel_rq;
 
        batch = h->batch;
        if (INTEL_GEN(i915) >= 8) {
@@ -213,52 +248,16 @@ static int emit_recurse_batch(struct hang *h,
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
+cancel_rq:
+       if (err) {
+               i915_request_skip(rq, err);
+               i915_request_add(rq);
+       }
 unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
-       return err;
-}
-
-static struct i915_request *
-hang_create_request(struct hang *h, struct intel_engine_cs *engine)
-{
-       struct i915_request *rq;
-       int err;
-
-       if (i915_gem_object_is_active(h->obj)) {
-               struct drm_i915_gem_object *obj;
-               void *vaddr;
-
-               obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
-               if (IS_ERR(obj))
-                       return ERR_CAST(obj);
-
-               vaddr = i915_gem_object_pin_map(obj,
-                                               i915_coherent_map_type(h->i915));
-               if (IS_ERR(vaddr)) {
-                       i915_gem_object_put(obj);
-                       return ERR_CAST(vaddr);
-               }
-
-               i915_gem_object_unpin_map(h->obj);
-               i915_gem_object_put(h->obj);
-
-               h->obj = obj;
-               h->batch = vaddr;
-       }
-
-       rq = i915_request_alloc(engine, h->ctx);
-       if (IS_ERR(rq))
-               return rq;
-
-       err = emit_recurse_batch(h, rq);
-       if (err) {
-               i915_request_add(rq);
-               return ERR_PTR(err);
-       }
-
-       return rq;
+       return err ? ERR_PTR(err) : rq;
 }
 
 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
@@ -364,9 +363,7 @@ static int igt_global_reset(void *arg)
        /* Check that we can issue a global GPU reset */
 
        igt_global_reset_lock(i915);
-       set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
 
-       mutex_lock(&i915->drm.struct_mutex);
        reset_count = i915_reset_count(&i915->gpu_error);
 
        i915_reset(i915, ALL_ENGINES, NULL);
@@ -375,9 +372,7 @@ static int igt_global_reset(void *arg)
                pr_err("No GPU reset recorded!\n");
                err = -EINVAL;
        }
-       mutex_unlock(&i915->drm.struct_mutex);
 
-       GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
        igt_global_reset_unlock(i915);
 
        if (i915_terminally_wedged(&i915->gpu_error))
@@ -386,6 +381,29 @@ static int igt_global_reset(void *arg)
        return err;
 }
 
+static int igt_wedged_reset(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       intel_wakeref_t wakeref;
+
+       /* Check that we can recover a wedged device with a GPU reset */
+
+       igt_global_reset_lock(i915);
+       wakeref = intel_runtime_pm_get(i915);
+
+       i915_gem_set_wedged(i915);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+       i915_reset(i915, ALL_ENGINES, NULL);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       intel_runtime_pm_put(i915, wakeref);
+       igt_global_reset_unlock(i915);
+
+       return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+}
+
 static bool wait_for_idle(struct intel_engine_cs *engine)
 {
        return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
@@ -431,8 +449,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 
                set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
                do {
-                       u32 seqno = intel_engine_get_seqno(engine);
-
                        if (active) {
                                struct i915_request *rq;
 
@@ -461,8 +477,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
                                        break;
                                }
 
-                               GEM_BUG_ON(!rq->global_seqno);
-                               seqno = rq->global_seqno - 1;
                                i915_request_put(rq);
                        }
 
@@ -478,16 +492,15 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
                                break;
                        }
 
-                       reset_engine_count += active;
                        if (i915_reset_engine_count(&i915->gpu_error, engine) !=
-                           reset_engine_count) {
-                               pr_err("%s engine reset %srecorded!\n",
-                                      engine->name, active ? "not " : "");
+                           ++reset_engine_count) {
+                               pr_err("%s engine reset not recorded!\n",
+                                      engine->name);
                                err = -EINVAL;
                                break;
                        }
 
-                       if (!wait_for_idle(engine)) {
+                       if (!i915_reset_flush(i915)) {
                                struct drm_printer p =
                                        drm_info_printer(i915->drm.dev);
 
@@ -710,7 +723,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 
                set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
                do {
-                       u32 seqno = intel_engine_get_seqno(engine);
                        struct i915_request *rq = NULL;
 
                        if (flags & TEST_ACTIVE) {
@@ -738,9 +750,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
                                        err = -EIO;
                                        break;
                                }
-
-                               GEM_BUG_ON(!rq->global_seqno);
-                               seqno = rq->global_seqno - 1;
                        }
 
                        err = i915_reset_engine(engine, NULL);
@@ -777,10 +786,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 
                reported = i915_reset_engine_count(&i915->gpu_error, engine);
                reported -= threads[engine->id].resets;
-               if (reported != (flags & TEST_ACTIVE ? count : 0)) {
-                       pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu, expected %lu reported\n",
-                              engine->name, test_name, count, reported,
-                              (flags & TEST_ACTIVE ? count : 0));
+               if (reported != count) {
+                       pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+                              engine->name, test_name, count, reported);
                        if (!err)
                                err = -EINVAL;
                }
@@ -879,20 +887,13 @@ static int igt_reset_engines(void *arg)
        return 0;
 }
 
-static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
+static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask)
 {
-       struct i915_gpu_error *error = &rq->i915->gpu_error;
-       u32 reset_count = i915_reset_count(error);
-
-       error->stalled_mask = mask;
-
-       /* set_bit() must be after we have setup the backchannel (mask) */
-       smp_mb__before_atomic();
-       set_bit(I915_RESET_HANDOFF, &error->flags);
+       u32 count = i915_reset_count(&i915->gpu_error);
 
-       wake_up_all(&error->wait_queue);
+       i915_reset(i915, mask, NULL);
 
-       return reset_count;
+       return count;
 }
 
 static int igt_reset_wait(void *arg)
@@ -938,7 +939,7 @@ static int igt_reset_wait(void *arg)
                goto out_rq;
        }
 
-       reset_count = fake_hangcheck(rq, ALL_ENGINES);
+       reset_count = fake_hangcheck(i915, ALL_ENGINES);
 
        timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
        if (timeout < 0) {
@@ -948,7 +949,6 @@ static int igt_reset_wait(void *arg)
                goto out_rq;
        }
 
-       GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
        if (i915_reset_count(&i915->gpu_error) == reset_count) {
                pr_err("No GPU reset recorded!\n");
                err = -EINVAL;
@@ -1127,7 +1127,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 
        wait_for_completion(&arg.completion);
 
-       if (wait_for(waitqueue_active(&rq->execute), 10)) {
+       if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
                struct drm_printer p = drm_info_printer(i915->drm.dev);
 
                pr_err("igt/evict_vma kthread did not wait\n");
@@ -1138,7 +1138,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
        }
 
 out_reset:
-       fake_hangcheck(rq, intel_engine_flag(rq->engine));
+       fake_hangcheck(rq->i915, intel_engine_flag(rq->engine));
 
        if (tsk) {
                struct igt_wedge_me w;
@@ -1317,12 +1317,7 @@ static int igt_reset_queue(void *arg)
                                goto fini;
                        }
 
-                       reset_count = fake_hangcheck(prev, ENGINE_MASK(id));
-
-                       i915_reset(i915, ENGINE_MASK(id), NULL);
-
-                       GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
-                                           &i915->gpu_error.flags));
+                       reset_count = fake_hangcheck(i915, ENGINE_MASK(id));
 
                        if (prev->fence.error != -EIO) {
                                pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1449,10 +1444,203 @@ err_unlock:
        return err;
 }
 
+static void __preempt_begin(void)
+{
+       preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+       preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+       local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+       local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+       local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+       local_irq_enable();
+}
+
+struct atomic_section {
+       const char *name;
+       void (*critical_section_begin)(void);
+       void (*critical_section_end)(void);
+};
+
+static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
+                                    const struct atomic_section *p,
+                                    const char *mode)
+{
+       struct tasklet_struct * const t = &engine->execlists.tasklet;
+       int err;
+
+       GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
+                 engine->name, mode, p->name);
+
+       tasklet_disable_nosync(t);
+       p->critical_section_begin();
+
+       err = i915_reset_engine(engine, NULL);
+
+       p->critical_section_end();
+       tasklet_enable(t);
+
+       if (err)
+               pr_err("i915_reset_engine(%s:%s) failed under %s\n",
+                      engine->name, mode, p->name);
+
+       return err;
+}
+
+static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
+                                  const struct atomic_section *p)
+{
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_request *rq;
+       struct hang h;
+       int err;
+
+       err = __igt_atomic_reset_engine(engine, p, "idle");
+       if (err)
+               return err;
+
+       err = hang_init(&h, i915);
+       if (err)
+               return err;
+
+       rq = hang_create_request(&h, engine);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto out;
+       }
+
+       i915_request_get(rq);
+       i915_request_add(rq);
+
+       if (wait_until_running(&h, rq)) {
+               err = __igt_atomic_reset_engine(engine, p, "active");
+       } else {
+               pr_err("%s(%s): Failed to start request %llx, at %x\n",
+                      __func__, engine->name,
+                      rq->fence.seqno, hws_seqno(&h, rq));
+               i915_gem_set_wedged(i915);
+               err = -EIO;
+       }
+
+       if (err == 0) {
+               struct igt_wedge_me w;
+
+               igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
+                       i915_request_wait(rq,
+                                         I915_WAIT_LOCKED,
+                                         MAX_SCHEDULE_TIMEOUT);
+               if (i915_terminally_wedged(&i915->gpu_error))
+                       err = -EIO;
+       }
+
+       i915_request_put(rq);
+out:
+       hang_fini(&h);
+       return err;
+}
+
+static void force_reset(struct drm_i915_private *i915)
+{
+       i915_gem_set_wedged(i915);
+       i915_reset(i915, 0, NULL);
+}
+
+static int igt_atomic_reset(void *arg)
+{
+       static const struct atomic_section phases[] = {
+               { "preempt", __preempt_begin, __preempt_end },
+               { "softirq", __softirq_begin, __softirq_end },
+               { "hardirq", __hardirq_begin, __hardirq_end },
+               { }
+       };
+       struct drm_i915_private *i915 = arg;
+       intel_wakeref_t wakeref;
+       int err = 0;
+
+       /* Check that the resets are usable from atomic context */
+
+       if (USES_GUC_SUBMISSION(i915))
+               return 0; /* guc is dead; long live the guc */
+
+       igt_global_reset_lock(i915);
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       /* Flush any requests before we get started and check basics */
+       force_reset(i915);
+       if (i915_terminally_wedged(&i915->gpu_error))
+               goto unlock;
+
+       if (intel_has_gpu_reset(i915)) {
+               const typeof(*phases) *p;
+
+               for (p = phases; p->name; p++) {
+                       GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+
+                       p->critical_section_begin();
+                       err = intel_gpu_reset(i915, ALL_ENGINES);
+                       p->critical_section_end();
+
+                       if (err) {
+                               pr_err("intel_gpu_reset failed under %s\n",
+                                      p->name);
+                               goto out;
+                       }
+               }
+
+               force_reset(i915);
+       }
+
+       if (intel_has_reset_engine(i915)) {
+               struct intel_engine_cs *engine;
+               enum intel_engine_id id;
+
+               for_each_engine(engine, i915, id) {
+                       const typeof(*phases) *p;
+
+                       for (p = phases; p->name; p++) {
+                               err = igt_atomic_reset_engine(engine, p);
+                               if (err)
+                                       goto out;
+                       }
+               }
+       }
+
+out:
+       /* As we poke around the guts, do a full reset before continuing. */
+       force_reset(i915);
+
+unlock:
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+       igt_global_reset_unlock(i915);
+
+       return err;
+}
+
 int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_global_reset), /* attempt to recover GPU first */
+               SUBTEST(igt_wedged_reset),
                SUBTEST(igt_hang_sanitycheck),
                SUBTEST(igt_reset_idle_engine),
                SUBTEST(igt_reset_active_engine),
@@ -1463,7 +1651,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_reset_evict_ppgtt),
                SUBTEST(igt_reset_evict_fence),
                SUBTEST(igt_handle_error),
+               SUBTEST(igt_atomic_reset),
        };
+       intel_wakeref_t wakeref;
        bool saved_hangcheck;
        int err;
 
@@ -1473,8 +1663,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
        if (i915_terminally_wedged(&i915->gpu_error))
                return -EIO; /* we're long past hope of a successful reset */
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
+       drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
 
        err = i915_subtests(tests, i915);
 
@@ -1483,7 +1674,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
        mutex_unlock(&i915->drm.struct_mutex);
 
        i915_modparams.enable_hangcheck = saved_hangcheck;
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        return err;
 }
index ca461e3a5f27ffe8bd85b9f0dc4e17078ff5468d..58144e024751fced7c19649b2569f72ab350c310 100644 (file)
@@ -4,6 +4,10 @@
  * Copyright Â© 2018 Intel Corporation
  */
 
+#include <linux/prime_numbers.h>
+
+#include "../i915_reset.h"
+
 #include "../i915_selftest.h"
 #include "igt_flush_test.h"
 #include "igt_spinner.h"
@@ -18,13 +22,14 @@ static int live_sanitycheck(void *arg)
        struct i915_gem_context *ctx;
        enum intel_engine_id id;
        struct igt_spinner spin;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_CONTEXTS(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin, i915))
                goto err_unlock;
@@ -65,7 +70,7 @@ err_spin:
        igt_spinner_fini(&spin);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
@@ -77,13 +82,14 @@ static int live_preempt(void *arg)
        struct igt_spinner spin_hi, spin_lo;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_PREEMPTION(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin_hi, i915))
                goto err_unlock;
@@ -158,7 +164,7 @@ err_spin_hi:
        igt_spinner_fini(&spin_hi);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
@@ -171,13 +177,14 @@ static int live_late_preempt(void *arg)
        struct intel_engine_cs *engine;
        struct i915_sched_attr attr = {};
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_PREEMPTION(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin_hi, i915))
                goto err_unlock;
@@ -251,7 +258,7 @@ err_spin_hi:
        igt_spinner_fini(&spin_hi);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 
@@ -263,6 +270,243 @@ err_wedged:
        goto err_ctx_lo;
 }
 
+struct preempt_client {
+       struct igt_spinner spin;
+       struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct drm_i915_private *i915,
+                              struct preempt_client *c)
+{
+       c->ctx = kernel_context(i915);
+       if (!c->ctx)
+               return -ENOMEM;
+
+       if (igt_spinner_init(&c->spin, i915))
+               goto err_ctx;
+
+       return 0;
+
+err_ctx:
+       kernel_context_close(c->ctx);
+       return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+       igt_spinner_fini(&c->spin);
+       kernel_context_close(c->ctx);
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_engine_cs *engine;
+       struct i915_sched_attr attr = {
+               .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
+       };
+       struct preempt_client a, b;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       int err = -ENOMEM;
+
+       /*
+        * Verify that if a preemption request does not cause a change in
+        * the current execution order, the preempt-to-idle injection is
+        * skipped and that we do not accidentally apply it after the CS
+        * completion event.
+        */
+
+       if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+               return 0;
+
+       if (USES_GUC_SUBMISSION(i915))
+               return 0; /* presume black blox */
+
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       if (preempt_client_init(i915, &a))
+               goto err_unlock;
+       if (preempt_client_init(i915, &b))
+               goto err_client_a;
+
+       for_each_engine(engine, i915, id) {
+               struct i915_request *rq_a, *rq_b;
+               int depth;
+
+               engine->execlists.preempt_hang.count = 0;
+
+               rq_a = igt_spinner_create_request(&a.spin,
+                                                 a.ctx, engine,
+                                                 MI_NOOP);
+               if (IS_ERR(rq_a)) {
+                       err = PTR_ERR(rq_a);
+                       goto err_client_b;
+               }
+
+               i915_request_add(rq_a);
+               if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+                       pr_err("First client failed to start\n");
+                       goto err_wedged;
+               }
+
+               for (depth = 0; depth < 8; depth++) {
+                       rq_b = igt_spinner_create_request(&b.spin,
+                                                         b.ctx, engine,
+                                                         MI_NOOP);
+                       if (IS_ERR(rq_b)) {
+                               err = PTR_ERR(rq_b);
+                               goto err_client_b;
+                       }
+                       i915_request_add(rq_b);
+
+                       GEM_BUG_ON(i915_request_completed(rq_a));
+                       engine->schedule(rq_a, &attr);
+                       igt_spinner_end(&a.spin);
+
+                       if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+                               pr_err("Second client failed to start\n");
+                               goto err_wedged;
+                       }
+
+                       swap(a, b);
+                       rq_a = rq_b;
+               }
+               igt_spinner_end(&a.spin);
+
+               if (engine->execlists.preempt_hang.count) {
+                       pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
+                              engine->execlists.preempt_hang.count,
+                              depth);
+                       err = -EINVAL;
+                       goto err_client_b;
+               }
+
+               if (igt_flush_test(i915, I915_WAIT_LOCKED))
+                       goto err_wedged;
+       }
+
+       err = 0;
+err_client_b:
+       preempt_client_fini(&b);
+err_client_a:
+       preempt_client_fini(&a);
+err_unlock:
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+       return err;
+
+err_wedged:
+       igt_spinner_end(&b.spin);
+       igt_spinner_end(&a.spin);
+       i915_gem_set_wedged(i915);
+       err = -EIO;
+       goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_engine_cs *engine;
+       struct preempt_client hi, lo;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       int err = -ENOMEM;
+
+       /*
+        * Build a chain AB...BA between two contexts (A, B) and request
+        * preemption of the last request. It should then complete before
+        * the previously submitted spinner in B.
+        */
+
+       if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+               return 0;
+
+       mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
+       if (preempt_client_init(i915, &hi))
+               goto err_unlock;
+
+       if (preempt_client_init(i915, &lo))
+               goto err_client_hi;
+
+       for_each_engine(engine, i915, id) {
+               struct i915_sched_attr attr = {
+                       .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+               };
+               int count, i;
+
+               for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
+                       struct i915_request *rq;
+
+                       rq = igt_spinner_create_request(&hi.spin,
+                                                       hi.ctx, engine,
+                                                       MI_ARB_CHECK);
+                       if (IS_ERR(rq))
+                               goto err_wedged;
+                       i915_request_add(rq);
+                       if (!igt_wait_for_spinner(&hi.spin, rq))
+                               goto err_wedged;
+
+                       rq = igt_spinner_create_request(&lo.spin,
+                                                       lo.ctx, engine,
+                                                       MI_ARB_CHECK);
+                       if (IS_ERR(rq))
+                               goto err_wedged;
+                       i915_request_add(rq);
+
+                       for (i = 0; i < count; i++) {
+                               rq = i915_request_alloc(engine, lo.ctx);
+                               if (IS_ERR(rq))
+                                       goto err_wedged;
+                               i915_request_add(rq);
+                       }
+
+                       rq = i915_request_alloc(engine, hi.ctx);
+                       if (IS_ERR(rq))
+                               goto err_wedged;
+                       i915_request_add(rq);
+                       engine->schedule(rq, &attr);
+
+                       igt_spinner_end(&hi.spin);
+                       if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+                               struct drm_printer p =
+                                       drm_info_printer(i915->drm.dev);
+
+                               pr_err("Failed to preempt over chain of %d\n",
+                                      count);
+                               intel_engine_dump(engine, &p,
+                                                 "%s\n", engine->name);
+                               goto err_wedged;
+                       }
+                       igt_spinner_end(&lo.spin);
+               }
+       }
+
+       err = 0;
+err_client_lo:
+       preempt_client_fini(&lo);
+err_client_hi:
+       preempt_client_fini(&hi);
+err_unlock:
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       intel_runtime_pm_put(i915, wakeref);
+       mutex_unlock(&i915->drm.struct_mutex);
+       return err;
+
+err_wedged:
+       igt_spinner_end(&hi.spin);
+       igt_spinner_end(&lo.spin);
+       i915_gem_set_wedged(i915);
+       err = -EIO;
+       goto err_client_lo;
+}
+
 static int live_preempt_hang(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -270,6 +514,7 @@ static int live_preempt_hang(void *arg)
        struct igt_spinner spin_hi, spin_lo;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_PREEMPTION(i915))
@@ -279,7 +524,7 @@ static int live_preempt_hang(void *arg)
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin_hi, i915))
                goto err_unlock;
@@ -374,7 +619,7 @@ err_spin_hi:
        igt_spinner_fini(&spin_hi);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
@@ -522,7 +767,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
 
        pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
                count, flags,
-               INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+               RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
        return 0;
 }
 
@@ -550,7 +795,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
 
        pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
                count, flags,
-               INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+               RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
        return 0;
 }
 
@@ -562,6 +807,7 @@ static int live_preempt_smoke(void *arg)
                .ncontext = 1024,
        };
        const unsigned int phase[] = { 0, BATCH };
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
        u32 *cs;
        int n;
@@ -576,7 +822,7 @@ static int live_preempt_smoke(void *arg)
                return -ENOMEM;
 
        mutex_lock(&smoke.i915->drm.struct_mutex);
-       intel_runtime_pm_get(smoke.i915);
+       wakeref = intel_runtime_pm_get(smoke.i915);
 
        smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
        if (IS_ERR(smoke.batch)) {
@@ -627,7 +873,7 @@ err_ctx:
 err_batch:
        i915_gem_object_put(smoke.batch);
 err_unlock:
-       intel_runtime_pm_put(smoke.i915);
+       intel_runtime_pm_put(smoke.i915, wakeref);
        mutex_unlock(&smoke.i915->drm.struct_mutex);
        kfree(smoke.contexts);
 
@@ -640,6 +886,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_sanitycheck),
                SUBTEST(live_preempt),
                SUBTEST(live_late_preempt),
+               SUBTEST(live_suppress_self_preempt),
+               SUBTEST(live_chain_preempt),
                SUBTEST(live_preempt_hang),
                SUBTEST(live_preempt_smoke),
        };
index 67017d5175b8c6cfcb8c5df8b2a90314f517b621..b15c4f26c5933c32a086d8b4441ccbac1846d424 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include "../i915_selftest.h"
+#include "../i915_reset.h"
 
 #include "igt_flush_test.h"
 #include "igt_reset.h"
 #include "igt_wedge_me.h"
 #include "mock_context.h"
 
+#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
+struct wa_lists {
+       struct i915_wa_list gt_wa_list;
+       struct {
+               char name[REF_NAME_MAX];
+               struct i915_wa_list wa_list;
+       } engine[I915_NUM_ENGINES];
+};
+
+static void
+reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       memset(lists, 0, sizeof(*lists));
+
+       wa_init_start(&lists->gt_wa_list, "GT_REF");
+       gt_init_workarounds(i915, &lists->gt_wa_list);
+       wa_init_finish(&lists->gt_wa_list);
+
+       for_each_engine(engine, i915, id) {
+               struct i915_wa_list *wal = &lists->engine[id].wa_list;
+               char *name = lists->engine[id].name;
+
+               snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
+
+               wa_init_start(wal, name);
+               engine_init_workarounds(engine, wal);
+               wa_init_finish(wal);
+       }
+}
+
+static void
+reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, i915, id)
+               intel_wa_list_free(&lists->engine[id].wa_list);
+
+       intel_wa_list_free(&lists->gt_wa_list);
+}
+
 static struct drm_i915_gem_object *
 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 {
+       const u32 base = engine->mmio_base;
        struct drm_i915_gem_object *result;
+       intel_wakeref_t wakeref;
        struct i915_request *rq;
        struct i915_vma *vma;
-       const u32 base = engine->mmio_base;
        u32 srm, *cs;
        int err;
        int i;
@@ -47,9 +94,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        if (err)
                goto err_obj;
 
-       intel_runtime_pm_get(engine->i915);
-       rq = i915_request_alloc(engine, ctx);
-       intel_runtime_pm_put(engine->i915);
+       rq = ERR_PTR(-ENODEV);
+       with_intel_runtime_pm(engine->i915, wakeref)
+               rq = i915_request_alloc(engine, ctx);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto err_pin;
@@ -167,7 +214,6 @@ out_put:
 
 static int do_device_reset(struct intel_engine_cs *engine)
 {
-       set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
        i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
        return 0;
 }
@@ -183,20 +229,22 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
 {
        struct i915_gem_context *ctx;
        struct i915_request *rq;
+       intel_wakeref_t wakeref;
        int err = 0;
 
        ctx = kernel_context(engine->i915);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       intel_runtime_pm_get(engine->i915);
-
-       if (spin)
-               rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
-       else
-               rq = i915_request_alloc(engine, ctx);
-
-       intel_runtime_pm_put(engine->i915);
+       rq = ERR_PTR(-ENODEV);
+       with_intel_runtime_pm(engine->i915, wakeref) {
+               if (spin)
+                       rq = igt_spinner_create_request(spin,
+                                                       ctx, engine,
+                                                       MI_NOOP);
+               else
+                       rq = i915_request_alloc(engine, ctx);
+       }
 
        kernel_context_close(ctx);
 
@@ -228,6 +276,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
        bool want_spin = reset == do_engine_reset;
        struct i915_gem_context *ctx;
        struct igt_spinner spin;
+       intel_wakeref_t wakeref;
        int err;
 
        pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
@@ -253,9 +302,8 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
        if (err)
                goto out;
 
-       intel_runtime_pm_get(i915);
-       err = reset(engine);
-       intel_runtime_pm_put(i915);
+       with_intel_runtime_pm(i915, wakeref)
+               err = reset(engine);
 
        if (want_spin) {
                igt_spinner_end(&spin);
@@ -326,16 +374,17 @@ out:
        return err;
 }
 
-static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+static bool verify_gt_engine_wa(struct drm_i915_private *i915,
+                               struct wa_lists *lists, const char *str)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        bool ok = true;
 
-       ok &= intel_gt_verify_workarounds(i915, str);
+       ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
 
        for_each_engine(engine, i915, id)
-               ok &= intel_engine_verify_workarounds(engine, str);
+               ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
 
        return ok;
 }
@@ -344,7 +393,8 @@ static int
 live_gpu_reset_gt_engine_workarounds(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct i915_gpu_error *error = &i915->gpu_error;
+       intel_wakeref_t wakeref;
+       struct wa_lists lists;
        bool ok;
 
        if (!intel_has_gpu_reset(i915))
@@ -353,19 +403,21 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
        pr_info("Verifying after GPU reset...\n");
 
        igt_global_reset_lock(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
-       ok = verify_gt_engine_wa(i915, "before reset");
+       reference_lists_init(i915, &lists);
+
+       ok = verify_gt_engine_wa(i915, &lists, "before reset");
        if (!ok)
                goto out;
 
-       intel_runtime_pm_get(i915);
-       set_bit(I915_RESET_HANDOFF, &error->flags);
        i915_reset(i915, ALL_ENGINES, "live_workarounds");
-       intel_runtime_pm_put(i915);
 
-       ok = verify_gt_engine_wa(i915, "after reset");
+       ok = verify_gt_engine_wa(i915, &lists, "after reset");
 
 out:
+       reference_lists_fini(i915, &lists);
+       intel_runtime_pm_put(i915, wakeref);
        igt_global_reset_unlock(i915);
 
        return ok ? 0 : -ESRCH;
@@ -380,6 +432,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
        struct igt_spinner spin;
        enum intel_engine_id id;
        struct i915_request *rq;
+       intel_wakeref_t wakeref;
+       struct wa_lists lists;
        int ret = 0;
 
        if (!intel_has_reset_engine(i915))
@@ -390,23 +444,24 @@ live_engine_reset_gt_engine_workarounds(void *arg)
                return PTR_ERR(ctx);
 
        igt_global_reset_lock(i915);
+       wakeref = intel_runtime_pm_get(i915);
+
+       reference_lists_init(i915, &lists);
 
        for_each_engine(engine, i915, id) {
                bool ok;
 
                pr_info("Verifying after %s reset...\n", engine->name);
 
-               ok = verify_gt_engine_wa(i915, "before reset");
+               ok = verify_gt_engine_wa(i915, &lists, "before reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
                }
 
-               intel_runtime_pm_get(i915);
                i915_reset_engine(engine, "live_workarounds");
-               intel_runtime_pm_put(i915);
 
-               ok = verify_gt_engine_wa(i915, "after idle reset");
+               ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
@@ -416,13 +471,10 @@ live_engine_reset_gt_engine_workarounds(void *arg)
                if (ret)
                        goto err;
 
-               intel_runtime_pm_get(i915);
-
                rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
                if (IS_ERR(rq)) {
                        ret = PTR_ERR(rq);
                        igt_spinner_fini(&spin);
-                       intel_runtime_pm_put(i915);
                        goto err;
                }
 
@@ -431,19 +483,16 @@ live_engine_reset_gt_engine_workarounds(void *arg)
                if (!igt_wait_for_spinner(&spin, rq)) {
                        pr_err("Spinner failed to start\n");
                        igt_spinner_fini(&spin);
-                       intel_runtime_pm_put(i915);
                        ret = -ETIMEDOUT;
                        goto err;
                }
 
                i915_reset_engine(engine, "live_workarounds");
 
-               intel_runtime_pm_put(i915);
-
                igt_spinner_end(&spin);
                igt_spinner_fini(&spin);
 
-               ok = verify_gt_engine_wa(i915, "after busy reset");
+               ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
@@ -451,6 +500,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
        }
 
 err:
+       reference_lists_fini(i915, &lists);
+       intel_runtime_pm_put(i915, wakeref);
        igt_global_reset_unlock(i915);
        kernel_context_close(ctx);
 
index b26f07b55d861c04b45dbad8d56c4ea9cd414540..2bfa72c1654b008003e1fe988563ca6bdb176065 100644 (file)
@@ -76,3 +76,57 @@ void timed_fence_fini(struct timed_fence *tf)
        destroy_timer_on_stack(&tf->timer);
        i915_sw_fence_fini(&tf->fence);
 }
+
+struct heap_fence {
+       struct i915_sw_fence fence;
+       union {
+               struct kref ref;
+               struct rcu_head rcu;
+       };
+};
+
+static int __i915_sw_fence_call
+heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+       struct heap_fence *h = container_of(fence, typeof(*h), fence);
+
+       switch (state) {
+       case FENCE_COMPLETE:
+               break;
+
+       case FENCE_FREE:
+               heap_fence_put(&h->fence);
+       }
+
+       return NOTIFY_DONE;
+}
+
+struct i915_sw_fence *heap_fence_create(gfp_t gfp)
+{
+       struct heap_fence *h;
+
+       h = kmalloc(sizeof(*h), gfp);
+       if (!h)
+               return NULL;
+
+       i915_sw_fence_init(&h->fence, heap_fence_notify);
+       refcount_set(&h->ref.refcount, 2);
+
+       return &h->fence;
+}
+
+static void heap_fence_release(struct kref *ref)
+{
+       struct heap_fence *h = container_of(ref, typeof(*h), ref);
+
+       i915_sw_fence_fini(&h->fence);
+
+       kfree_rcu(h, rcu);
+}
+
+void heap_fence_put(struct i915_sw_fence *fence)
+{
+       struct heap_fence *h = container_of(fence, typeof(*h), fence);
+
+       kref_put(&h->ref, heap_fence_release);
+}
index 474aafb92ae1afa5d7c5408afdc950ca55065473..1f9927e10f3a47f505f7ae3299ff86084b2f4e49 100644 (file)
@@ -39,4 +39,7 @@ struct timed_fence {
 void timed_fence_init(struct timed_fence *tf, unsigned long expires);
 void timed_fence_fini(struct timed_fence *tf);
 
+struct i915_sw_fence *heap_fence_create(gfp_t gfp);
+void heap_fence_put(struct i915_sw_fence *fence);
+
 #endif /* _LIB_SW_FENCE_H_ */
index d937bdff26f99cec944a4550f02fddd3ad482772..b646cdcdd6029a95bace9ed5f537abcf28464abb 100644 (file)
@@ -45,11 +45,8 @@ mock_context(struct drm_i915_private *i915,
        INIT_LIST_HEAD(&ctx->handles_list);
        INIT_LIST_HEAD(&ctx->hw_id_link);
 
-       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
-               struct intel_context *ce = &ctx->__engine[n];
-
-               ce->gem_context = ctx;
-       }
+       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
+               intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
 
        ret = i915_gem_context_pin_hw_id(ctx);
        if (ret < 0)
index d0c44c18db429cd064c55dd51439ef01087c5779..08f0cab02e0f331d9ecfbb03e25cc5f7b241852d 100644 (file)
@@ -30,6 +30,52 @@ struct mock_ring {
        struct i915_timeline timeline;
 };
 
+static void mock_timeline_pin(struct i915_timeline *tl)
+{
+       tl->pin_count++;
+}
+
+static void mock_timeline_unpin(struct i915_timeline *tl)
+{
+       GEM_BUG_ON(!tl->pin_count);
+       tl->pin_count--;
+}
+
+static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+{
+       const unsigned long sz = PAGE_SIZE / 2;
+       struct mock_ring *ring;
+
+       ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       if (i915_timeline_init(engine->i915,
+                              &ring->timeline, engine->name,
+                              NULL)) {
+               kfree(ring);
+               return NULL;
+       }
+
+       ring->base.size = sz;
+       ring->base.effective_size = sz;
+       ring->base.vaddr = (void *)(ring + 1);
+       ring->base.timeline = &ring->timeline;
+
+       INIT_LIST_HEAD(&ring->base.request_list);
+       intel_ring_update_space(&ring->base);
+
+       return &ring->base;
+}
+
+static void mock_ring_free(struct intel_ring *base)
+{
+       struct mock_ring *ring = container_of(base, typeof(*ring), base);
+
+       i915_timeline_fini(&ring->timeline);
+       kfree(ring);
+}
+
 static struct mock_request *first_request(struct mock_engine *engine)
 {
        return list_first_entry_or_null(&engine->hw_queue,
@@ -37,24 +83,29 @@ static struct mock_request *first_request(struct mock_engine *engine)
                                        link);
 }
 
-static void advance(struct mock_engine *engine,
-                   struct mock_request *request)
+static void advance(struct mock_request *request)
 {
        list_del_init(&request->link);
-       mock_seqno_advance(&engine->base, request->base.global_seqno);
+       intel_engine_write_global_seqno(request->base.engine,
+                                       request->base.global_seqno);
+       i915_request_mark_complete(&request->base);
+       GEM_BUG_ON(!i915_request_completed(&request->base));
+
+       intel_engine_queue_breadcrumbs(request->base.engine);
 }
 
 static void hw_delay_complete(struct timer_list *t)
 {
        struct mock_engine *engine = from_timer(engine, t, hw_delay);
        struct mock_request *request;
+       unsigned long flags;
 
-       spin_lock(&engine->hw_lock);
+       spin_lock_irqsave(&engine->hw_lock, flags);
 
        /* Timer fired, first request is complete */
        request = first_request(engine);
        if (request)
-               advance(engine, request);
+               advance(request);
 
        /*
         * Also immediately signal any subsequent 0-delay requests, but
@@ -66,20 +117,24 @@ static void hw_delay_complete(struct timer_list *t)
                        break;
                }
 
-               advance(engine, request);
+               advance(request);
        }
 
-       spin_unlock(&engine->hw_lock);
+       spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
 
 static void mock_context_unpin(struct intel_context *ce)
 {
+       mock_timeline_unpin(ce->ring->timeline);
        i915_gem_context_put(ce->gem_context);
 }
 
 static void mock_context_destroy(struct intel_context *ce)
 {
        GEM_BUG_ON(ce->pin_count);
+
+       if (ce->ring)
+               mock_ring_free(ce->ring);
 }
 
 static const struct intel_context_ops mock_context_ops = {
@@ -92,14 +147,26 @@ mock_context_pin(struct intel_engine_cs *engine,
                 struct i915_gem_context *ctx)
 {
        struct intel_context *ce = to_intel_context(ctx, engine);
+       int err = -ENOMEM;
 
-       if (!ce->pin_count++) {
-               i915_gem_context_get(ctx);
-               ce->ring = engine->buffer;
-               ce->ops = &mock_context_ops;
+       if (ce->pin_count++)
+               return ce;
+
+       if (!ce->ring) {
+               ce->ring = mock_ring(engine);
+               if (!ce->ring)
+                       goto err;
        }
 
+       mock_timeline_pin(ce->ring->timeline);
+
+       ce->ops = &mock_context_ops;
+       i915_gem_context_get(ctx);
        return ce;
+
+err:
+       ce->pin_count = 0;
+       return ERR_PTR(err);
 }
 
 static int mock_request_alloc(struct i915_request *request)
@@ -118,9 +185,9 @@ static int mock_emit_flush(struct i915_request *request,
        return 0;
 }
 
-static void mock_emit_breadcrumb(struct i915_request *request,
-                                u32 *flags)
+static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
 {
+       return cs;
 }
 
 static void mock_submit_request(struct i915_request *request)
@@ -128,51 +195,20 @@ static void mock_submit_request(struct i915_request *request)
        struct mock_request *mock = container_of(request, typeof(*mock), base);
        struct mock_engine *engine =
                container_of(request->engine, typeof(*engine), base);
+       unsigned long flags;
 
        i915_request_submit(request);
        GEM_BUG_ON(!request->global_seqno);
 
-       spin_lock_irq(&engine->hw_lock);
+       spin_lock_irqsave(&engine->hw_lock, flags);
        list_add_tail(&mock->link, &engine->hw_queue);
        if (mock->link.prev == &engine->hw_queue) {
                if (mock->delay)
                        mod_timer(&engine->hw_delay, jiffies + mock->delay);
                else
-                       advance(engine, mock);
+                       advance(mock);
        }
-       spin_unlock_irq(&engine->hw_lock);
-}
-
-static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
-{
-       const unsigned long sz = PAGE_SIZE / 2;
-       struct mock_ring *ring;
-
-       BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
-
-       ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
-       if (!ring)
-               return NULL;
-
-       i915_timeline_init(engine->i915, &ring->timeline, engine->name);
-
-       ring->base.size = sz;
-       ring->base.effective_size = sz;
-       ring->base.vaddr = (void *)(ring + 1);
-       ring->base.timeline = &ring->timeline;
-
-       INIT_LIST_HEAD(&ring->base.request_list);
-       intel_ring_update_space(&ring->base);
-
-       return &ring->base;
-}
-
-static void mock_ring_free(struct intel_ring *base)
-{
-       struct mock_ring *ring = container_of(base, typeof(*ring), base);
-
-       i915_timeline_fini(&ring->timeline);
-       kfree(ring);
+       spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
 
 struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
@@ -191,39 +227,37 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        engine->base.i915 = i915;
        snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
        engine->base.id = id;
-       engine->base.status_page.page_addr = (void *)(engine + 1);
+       engine->base.status_page.addr = (void *)(engine + 1);
 
        engine->base.context_pin = mock_context_pin;
        engine->base.request_alloc = mock_request_alloc;
        engine->base.emit_flush = mock_emit_flush;
-       engine->base.emit_breadcrumb = mock_emit_breadcrumb;
+       engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
        engine->base.submit_request = mock_submit_request;
 
-       i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
+       if (i915_timeline_init(i915,
+                              &engine->base.timeline,
+                              engine->base.name,
+                              NULL))
+               goto err_free;
        i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
 
        intel_engine_init_breadcrumbs(&engine->base);
-       engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
 
        /* fake hw queue */
        spin_lock_init(&engine->hw_lock);
        timer_setup(&engine->hw_delay, hw_delay_complete, 0);
        INIT_LIST_HEAD(&engine->hw_queue);
 
-       engine->base.buffer = mock_ring(&engine->base);
-       if (!engine->base.buffer)
-               goto err_breadcrumbs;
-
        if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
-               goto err_ring;
+               goto err_breadcrumbs;
 
        return &engine->base;
 
-err_ring:
-       mock_ring_free(engine->base.buffer);
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(&engine->base);
        i915_timeline_fini(&engine->base.timeline);
+err_free:
        kfree(engine);
        return NULL;
 }
@@ -237,16 +271,14 @@ void mock_engine_flush(struct intel_engine_cs *engine)
        del_timer_sync(&mock->hw_delay);
 
        spin_lock_irq(&mock->hw_lock);
-       list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
-               list_del_init(&request->link);
-               mock_seqno_advance(&mock->base, request->base.global_seqno);
-       }
+       list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+               advance(request);
        spin_unlock_irq(&mock->hw_lock);
 }
 
 void mock_engine_reset(struct intel_engine_cs *engine)
 {
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0);
+       intel_engine_write_global_seqno(engine, 0);
 }
 
 void mock_engine_free(struct intel_engine_cs *engine)
@@ -263,8 +295,6 @@ void mock_engine_free(struct intel_engine_cs *engine)
 
        __intel_context_unpin(engine->i915->kernel_context, engine);
 
-       mock_ring_free(engine->buffer);
-
        intel_engine_fini_breadcrumbs(engine);
        i915_timeline_fini(&engine->timeline);
 
index 133d0c21790ddb3b095eb21571aec13249299a9e..b9cc3a245f1684264074372de1340bf03c7371b1 100644 (file)
@@ -46,10 +46,4 @@ void mock_engine_flush(struct intel_engine_cs *engine);
 void mock_engine_reset(struct intel_engine_cs *engine);
 void mock_engine_free(struct intel_engine_cs *engine);
 
-static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-       intel_engine_wakeup(engine);
-}
-
 #endif /* !__MOCK_ENGINE_H__ */
index 43ed8b28aeaa0ce86c6162814b5b9f30ce981b70..14ae46fda49f1c816fa2d5bb3e88a847eddcd57d 100644 (file)
@@ -58,8 +58,8 @@ static void mock_device_release(struct drm_device *dev)
        i915_gem_contexts_lost(i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
-       cancel_delayed_work_sync(&i915->gt.retire_work);
-       cancel_delayed_work_sync(&i915->gt.idle_work);
+       drain_delayed_work(&i915->gt.retire_work);
+       drain_delayed_work(&i915->gt.idle_work);
        i915_gem_drain_workqueue(i915);
 
        mutex_lock(&i915->drm.struct_mutex);
@@ -68,13 +68,14 @@ static void mock_device_release(struct drm_device *dev)
        i915_gem_contexts_fini(i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
+       i915_timelines_fini(i915);
+
        drain_workqueue(i915->wq);
        i915_gem_drain_freed_objects(i915);
 
        mutex_lock(&i915->drm.struct_mutex);
-       mock_fini_ggtt(i915);
+       mock_fini_ggtt(&i915->ggtt);
        mutex_unlock(&i915->drm.struct_mutex);
-       WARN_ON(!list_empty(&i915->gt.timelines));
 
        destroy_workqueue(i915->wq);
 
@@ -147,22 +148,24 @@ struct drm_i915_private *mock_gem_device(void)
        pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
        pdev->dev.release = release_dev;
        dev_set_name(&pdev->dev, "mock");
-       dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 
 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
        /* hack to disable iommu for the fake device; force identity mapping */
        pdev->dev.archdata.iommu = (void *)-1;
 #endif
 
+       i915 = (struct drm_i915_private *)(pdev + 1);
+       pci_set_drvdata(pdev, i915);
+
+       intel_runtime_pm_init_early(i915);
+
        dev_pm_domain_set(&pdev->dev, &pm_domain);
        pm_runtime_enable(&pdev->dev);
        pm_runtime_dont_use_autosuspend(&pdev->dev);
        if (pm_runtime_enabled(&pdev->dev))
                WARN_ON(pm_runtime_get_sync(&pdev->dev));
 
-       i915 = (struct drm_i915_private *)(pdev + 1);
-       pci_set_drvdata(pdev, i915);
-
        err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
        if (err) {
                pr_err("Failed to initialise mock GEM device: err=%d\n", err);
@@ -186,6 +189,7 @@ struct drm_i915_private *mock_gem_device(void)
 
        init_waitqueue_head(&i915->gpu_error.wait_queue);
        init_waitqueue_head(&i915->gpu_error.reset_queue);
+       mutex_init(&i915->gpu_error.wedge_mutex);
 
        i915->wq = alloc_ordered_workqueue("mock", 0);
        if (!i915->wq)
@@ -223,13 +227,14 @@ struct drm_i915_private *mock_gem_device(void)
        if (!i915->priorities)
                goto err_dependencies;
 
-       INIT_LIST_HEAD(&i915->gt.timelines);
+       i915_timelines_init(i915);
+
        INIT_LIST_HEAD(&i915->gt.active_rings);
        INIT_LIST_HEAD(&i915->gt.closed_vma);
 
        mutex_lock(&i915->drm.struct_mutex);
 
-       mock_init_ggtt(i915);
+       mock_init_ggtt(i915, &i915->ggtt);
 
        mkwrite_device_info(i915)->ring_mask = BIT(0);
        i915->kernel_context = mock_context(i915, NULL);
@@ -250,6 +255,7 @@ err_context:
        i915_gem_contexts_fini(i915);
 err_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
+       i915_timelines_fini(i915);
        kmem_cache_destroy(i915->priorities);
 err_dependencies:
        kmem_cache_destroy(i915->dependencies);
index 6ae418c76015b2187dd31437f3a460868765c5d5..cd83929fde8e120902b12615463594ee4080c9ae 100644 (file)
@@ -70,7 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915,
        ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
        ppgtt->vm.file = ERR_PTR(-ENODEV);
 
-       i915_address_space_init(&ppgtt->vm, i915);
+       i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
 
        ppgtt->vm.clear_range = nop_clear_range;
        ppgtt->vm.insert_page = mock_insert_page;
@@ -97,11 +97,12 @@ static void mock_unbind_ggtt(struct i915_vma *vma)
 {
 }
 
-void mock_init_ggtt(struct drm_i915_private *i915)
+void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
 {
-       struct i915_ggtt *ggtt = &i915->ggtt;
+       memset(ggtt, 0, sizeof(*ggtt));
 
        ggtt->vm.i915 = i915;
+       ggtt->vm.is_ggtt = true;
 
        ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
        ggtt->mappable_end = resource_size(&ggtt->gmadr);
@@ -117,14 +118,10 @@ void mock_init_ggtt(struct drm_i915_private *i915)
        ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
        ggtt->vm.vma_ops.clear_pages = clear_pages;
 
-       i915_address_space_init(&ggtt->vm, i915);
-
-       ggtt->vm.is_ggtt = true;
+       i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
 }
 
-void mock_fini_ggtt(struct drm_i915_private *i915)
+void mock_fini_ggtt(struct i915_ggtt *ggtt)
 {
-       struct i915_ggtt *ggtt = &i915->ggtt;
-
        i915_address_space_fini(&ggtt->vm);
 }
index 9a0a833bb545556e9655677154af4367f22c72db..40d544bde1d5cd9afbdc2e61b5142fbca3e0834b 100644 (file)
@@ -25,8 +25,8 @@
 #ifndef __MOCK_GTT_H
 #define __MOCK_GTT_H
 
-void mock_init_ggtt(struct drm_i915_private *i915);
-void mock_fini_ggtt(struct drm_i915_private *i915);
+void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
+void mock_fini_ggtt(struct i915_ggtt *ggtt);
 
 struct i915_hw_ppgtt *
 mock_ppgtt(struct drm_i915_private *i915,
index dcf3b16f5a07c3780c6f19efdad99a5eb55b3aa2..d2de9ece211820e320c067832f346ad3c2cab180 100644 (file)
 
 void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 {
+       timeline->i915 = NULL;
        timeline->fence_context = context;
 
        spin_lock_init(&timeline->lock);
 
-       init_request_active(&timeline->last_request, NULL);
+       INIT_ACTIVE_REQUEST(&timeline->barrier);
+       INIT_ACTIVE_REQUEST(&timeline->last_request);
        INIT_LIST_HEAD(&timeline->requests);
 
        i915_syncmap_init(&timeline->sync);
@@ -24,5 +26,5 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 
 void mock_timeline_fini(struct i915_timeline *timeline)
 {
-       i915_timeline_fini(timeline);
+       i915_syncmap_free(&timeline->sync);
 }
index 361e962a7969044aba1c8464096edfa493b4d0c7..6403728fe7784f54977b0c318d790ea886553a04 100644 (file)
@@ -23,7 +23,6 @@
  * Author: Jani Nikula <jani.nikula@intel.com>
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -257,9 +256,9 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
-static bool intel_dsi_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config,
-                                    struct drm_connector_state *conn_state)
+static int intel_dsi_compute_config(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
@@ -276,7 +275,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
        if (fixed_mode) {
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
-               if (HAS_GMCH_DISPLAY(dev_priv))
+               if (HAS_GMCH(dev_priv))
                        intel_gmch_panel_fitting(crtc, pipe_config,
                                                 conn_state->scaling_mode);
                else
@@ -285,11 +284,16 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
        }
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return false;
+               return -EINVAL;
 
        /* DSI uses short packets for sync events, so clear mode flags for DSI */
        adjusted_mode->flags = 0;
 
+       if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+               pipe_config->pipe_bpp = 24;
+       else
+               pipe_config->pipe_bpp = 18;
+
        if (IS_GEN9_LP(dev_priv)) {
                /* Enable Frame time stamp based scanline reporting */
                adjusted_mode->private_flags |=
@@ -303,16 +307,16 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 
                ret = bxt_dsi_pll_compute(encoder, pipe_config);
                if (ret)
-                       return false;
+                       return -EINVAL;
        } else {
                ret = vlv_dsi_pll_compute(encoder, pipe_config);
                if (ret)
-                       return false;
+                       return -EINVAL;
        }
 
        pipe_config->clock_set = true;
 
-       return true;
+       return 0;
 }
 
 static bool glk_dsi_enable_io(struct intel_encoder *encoder)
@@ -674,6 +678,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
                                        LANE_CONFIGURATION_DUAL_LINK_B :
                                        LANE_CONFIGURATION_DUAL_LINK_A;
                }
+
+               if (intel_dsi->pixel_format != MIPI_DSI_FMT_RGB888)
+                       temp |= DITHERING_ENABLE;
+
                /* assert ip_tg_enable signal */
                I915_WRITE(port_ctrl, temp | DPI_ENABLE);
                POSTING_READ(port_ctrl);
@@ -960,13 +968,15 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       intel_wakeref_t wakeref;
        enum port port;
        bool active = false;
 
        DRM_DEBUG_KMS("\n");
 
-       if (!intel_display_power_get_if_enabled(dev_priv,
-                                               encoder->power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    encoder->power_domain);
+       if (!wakeref)
                return false;
 
        /*
@@ -1022,7 +1032,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
        }
 
 out_put_power:
-       intel_display_power_put(dev_priv, encoder->power_domain);
+       intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
 
        return active;
 }
@@ -1058,10 +1068,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
        }
 
        fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
-       pipe_config->pipe_bpp =
-                       mipi_dsi_pixel_format_to_bpp(
-                               pixel_format_from_register_bits(fmt));
-       bpp = pipe_config->pipe_bpp;
+       bpp = mipi_dsi_pixel_format_to_bpp(
+                       pixel_format_from_register_bits(fmt));
 
        /* Enable Frame time stamo based scanline reporting */
        adjusted_mode->private_flags |=
@@ -1199,11 +1207,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
 
        if (IS_GEN9_LP(dev_priv)) {
                bxt_dsi_get_pipe_config(encoder, pipe_config);
-               pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
-                                       pipe_config);
+               pclk = bxt_dsi_get_pclk(encoder, pipe_config);
        } else {
-               pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
-                                       pipe_config);
+               pclk = vlv_dsi_get_pclk(encoder, pipe_config);
        }
 
        if (pclk) {
@@ -1575,6 +1581,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
        enum drm_panel_orientation orientation;
        struct intel_plane *plane;
        struct intel_crtc *crtc;
+       intel_wakeref_t wakeref;
        enum pipe pipe;
        u32 val;
 
@@ -1585,7 +1592,8 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
        plane = to_intel_plane(crtc->base.primary);
 
        power_domain = POWER_DOMAIN_PIPE(pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
                return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
 
        val = I915_READ(DSPCNTR(plane->i9xx_plane));
@@ -1597,7 +1605,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
        else
                orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, power_domain, wakeref);
 
        return orientation;
 }
@@ -1625,7 +1633,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
                u32 allowed_scalers;
 
                allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
-               if (!HAS_GMCH_DISPLAY(dev_priv))
+               if (!HAS_GMCH(dev_priv))
                        allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
 
                drm_connector_attach_scaling_mode_property(&connector->base,
@@ -1689,6 +1697,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
        intel_encoder->post_disable = intel_dsi_post_disable;
        intel_encoder->get_hw_state = intel_dsi_get_hw_state;
        intel_encoder->get_config = intel_dsi_get_config;
+       intel_encoder->update_pipe = intel_panel_update_backlight;
 
        intel_connector->get_hw_state = intel_connector_get_hw_state;
 
index a132a8037ecc6b2a317229918e8d2471cdbcb9dd..954d5a8c4fa761841b8cb8bcdd579809dd0c9014 100644 (file)
@@ -252,20 +252,12 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
                DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
 }
 
-static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
-{
-       int bpp = mipi_dsi_pixel_format_to_bpp(fmt);
-
-       WARN(bpp != pipe_bpp,
-            "bpp match assertion failure (expected %d, current %d)\n",
-            bpp, pipe_bpp);
-}
-
-u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
                     struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
        u32 dsi_clock, pclk;
        u32 pll_ctl, pll_div;
        u32 m = 0, p = 0, n;
@@ -319,15 +311,12 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
 
        dsi_clock = (m * refclk) / (p * n);
 
-       /* pixel_format and pipe_bpp should agree */
-       assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
-
-       pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
+       pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
 
        return pclk;
 }
 
-u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
                     struct intel_crtc_state *config)
 {
        u32 pclk;
@@ -335,12 +324,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
        u32 dsi_ratio;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       /* Divide by zero */
-       if (!pipe_bpp) {
-               DRM_ERROR("Invalid BPP(0)\n");
-               return 0;
-       }
+       int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 
        config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
 
@@ -348,10 +332,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
 
        dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
 
-       /* pixel_format and pipe_bpp should agree */
-       assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
-
-       pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);
+       pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
 
        DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
        return pclk;
index 77a26fd3a44ac18da443172883efaee8d66b7af7..06393cd1067db69ee448d8c08d30ae1691139498 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/regmap.h>
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder_slave.h>
 
index 820c7e3878f0235d7ab9b4dd966a215ec7afa5b0..44da0f5d0ed9042ab40f7adac59240caaa5d4e11 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <video/imx-ipu-v3.h>
 
 #include "imx-drm.h"
index 2c5bbe3173537346e1ea0d5c0b0f0e011ea485e7..e2a129d4ae7bd6d80423e272e959d11edbdd5280 100644 (file)
@@ -12,9 +12,9 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/of_device.h>
index 293dd5752583bdad9148685f88c621d8f7840aa2..e725af8a0025a110ad1b1d607eea083054e3f151 100644 (file)
@@ -17,7 +17,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <video/imx-ipu-v3.h>
 
 #include "imx-drm.h"
index 058b53c0aa7ecd5c308fa4b222146e57f3281db4..3c62167a92510a1d205b180ada730361c4964b25 100644 (file)
@@ -4,19 +4,19 @@
  *
  * Copyright (C) 2011 Sascha Hauer, Pengutronix
  */
+#include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/module.h>
-#include <linux/export.h>
 #include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <linux/clk.h>
-#include <linux/errno.h>
-#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <video/imx-ipu-v3.h>
 #include "imx-drm.h"
index f3ce51121dd62f967dbf011353097488bef47f7a..1a76de1e8e7bb500c08e7e8335a009580a136783 100644 (file)
@@ -10,9 +10,9 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/videodev2.h>
 #include <video/of_display_timing.h>
 
index 62a9d47df9487a64234657621da8a49abebe4ff9..22e68a100e7beeaf752efde0235dddc7e13b9683 100644 (file)
@@ -13,7 +13,7 @@
  */
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_of.h>
 #include <linux/kernel.h>
 #include <linux/component.h>
index 92ecb9bf982cfe7398eefe3993966fc2acf28b0c..acad088173dab2ccaedba044f116570af8d35e3d 100644 (file)
@@ -14,8 +14,8 @@
 #include <asm/barrier.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
 #include <soc/mediatek/smi.h>
index 6422e99952fe2624da4c475aee8a160b9f048340..cf59ea9bccfdf659ca042df67cf437a5da4a1078 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/component.h>
 #include <linux/iommu.h>
 #include <linux/of_address.h>
index be5f6f1daf5542973f28b6c3db07f9decafd5bbe..e20fcaef28515b346b4e9b27bce0d59274271d4f 100644 (file)
@@ -12,7 +12,7 @@
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_framebuffer_helper.h>
index 27b507eb4a997d5dee2809d454341cb7a79e2731..b00eb2d2e086c69a0bdb043bc6c524327c6a9a1f 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/iopoll.h>
index 862f3ec221318800f9850755e2ec2d89deec7df4..915cc84621aeaf62516681a87d56e0e9760197d4 100644 (file)
@@ -14,7 +14,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_edid.h>
 #include <linux/arm-smccc.h>
 #include <linux/clk.h>
@@ -981,7 +981,8 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
        u8 buffer[17];
        ssize_t err;
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+                                                      &hdmi->conn, mode);
        if (err < 0) {
                dev_err(hdmi->dev,
                        "Failed to get AVI infoframe from mode: %zd\n", err);
@@ -1370,8 +1371,8 @@ static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
 }
 
 static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
-                                    struct drm_display_mode *mode,
-                                    struct drm_display_mode *adjusted_mode)
+                               const struct drm_display_mode *mode,
+                               const struct drm_display_mode *adjusted_mode)
 {
        struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
 
index 75d97f1b2e8fbf08e69b289f8a383ec4fd13291e..ec573c04206b7d925e8a78fdec4fb8b5162843c2 100644 (file)
@@ -30,7 +30,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_flip_work.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "meson_crtc.h"
 #include "meson_plane.h"
index 3ee4d4a4ecbae1e59c1fb5e0dfb873b24bf4497c..27f38bdc8677804376521a9b5e672bdfcacfb46c 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_flip_work.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_rect.h>
-#include <drm/drm_fb_helper.h>
 
 #include "meson_drv.h"
 #include "meson_plane.h"
@@ -90,7 +90,7 @@ static irqreturn_t meson_irq(int irq, void *arg)
 DEFINE_DRM_GEM_CMA_FOPS(fops);
 
 static struct drm_driver meson_driver = {
-       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_GEM |
+       .driver_features        = DRIVER_GEM |
                                  DRIVER_MODESET | DRIVER_PRIME |
                                  DRIVER_ATOMIC,
 
@@ -152,6 +152,23 @@ static void meson_vpu_init(struct meson_drm *priv)
        writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
 }
 
+static void meson_remove_framebuffers(void)
+{
+       struct apertures_struct *ap;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       /* The framebuffer can be located anywhere in RAM */
+       ap->ranges[0].base = 0;
+       ap->ranges[0].size = ~0;
+
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "meson-drm-fb",
+                                                     false);
+       kfree(ap);
+}
+
 static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -262,6 +279,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        if (ret)
                goto free_drm;
 
+       /* Remove early framebuffers (ie. simplefb) */
+       meson_remove_framebuffers();
+
        drm_mode_config_init(drm);
        drm->mode_config.max_width = 3840;
        drm->mode_config.max_height = 2160;
index 807111ebfdd97785ac2b6855a1af14dc5067a8f9..e28814f4ea6cd2e05724ee46a0892b261d3d4cef 100644 (file)
@@ -26,9 +26,9 @@
 #include <linux/regulator/consumer.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/bridge/dw_hdmi.h>
 
 #include <uapi/linux/media-bus-format.h>
@@ -365,7 +365,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
        unsigned int wr_clk =
                readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING));
 
-       DRM_DEBUG_DRIVER("%d:\"%s\"\n", mode->base.id, mode->name);
+       DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name,
+                        mode->clock > 340000 ? 40 : 10);
 
        /* Enable clocks */
        regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
@@ -385,9 +386,17 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
        /* Enable normal output to PHY */
        dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
 
-       /* TMDS pattern setup (TOFIX pattern for 4k2k scrambling) */
-       dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0x001f001f);
-       dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23, 0x001f001f);
+       /* TMDS pattern setup (TOFIX Handle the YUV420 case) */
+       if (mode->clock > 340000) {
+               dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0);
+               dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+                                 0x03ff03ff);
+       } else {
+               dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+                                 0x001f001f);
+               dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+                                 0x001f001f);
+       }
 
        /* Load TMDS pattern */
        dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
@@ -413,6 +422,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
        /* Disable clock, fifo, fifo_wr */
        regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
 
+       dw_hdmi_set_high_tmds_clock_ratio(hdmi);
+
        msleep(100);
 
        /* Reset PHY 3 times in a row */
@@ -555,12 +566,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        int vic = drm_match_cea_mode(mode);
        enum drm_mode_status status;
 
-       DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
-               mode->base.id, mode->name, mode->vrefresh, mode->clock,
-               mode->hdisplay, mode->hsync_start,
-               mode->hsync_end, mode->htotal,
-               mode->vdisplay, mode->vsync_start,
-               mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+       DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+
+       /* If sink max TMDS clock, we reject the mode */
+       if (mode->clock > connector->display_info.max_tmds_clock)
+               return MODE_BAD;
 
        /* Check against non-VIC supported modes */
        if (!vic) {
@@ -650,8 +660,7 @@ static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
        struct meson_drm *priv = dw_hdmi->priv;
        int vic = drm_match_cea_mode(mode);
 
-       DRM_DEBUG_DRIVER("%d:\"%s\" vic %d\n",
-                        mode->base.id, mode->name, vic);
+       DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
 
        /* VENC + VENC-DVI Mode setup */
        meson_venc_hdmi_mode_set(priv, vic, mode);
index 0ba04f6813e63f0428b37c1aa977bd50b15eeccb..66d73a932d193668e543d6a15c7aa0fcdbc282a1 100644 (file)
@@ -848,6 +848,8 @@ struct meson_hdmi_venc_vic_mode {
        { 93, &meson_hdmi_encp_mode_2160p24 },
        { 94, &meson_hdmi_encp_mode_2160p25 },
        { 95, &meson_hdmi_encp_mode_2160p30 },
+       { 96, &meson_hdmi_encp_mode_2160p25 },
+       { 97, &meson_hdmi_encp_mode_2160p30 },
        { 0, NULL}, /* sentinel */
 };
 
index f7945bae3b4a9e74b7400b463c95984b93cabc59..d622d817b6df18cdf8ab05e490cd7d4b0cf0aa40 100644 (file)
@@ -26,9 +26,9 @@
 #include <linux/of_graph.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #include "meson_venc_cvbs.h"
 #include "meson_venc.h"
index 1aad27813c23f6b5e823573ed3440aed141f194f..6e1d1054ad0637bdbcd94f0c7e2d08ed63d4790f 100644 (file)
@@ -57,7 +57,7 @@ static const struct file_operations mga_driver_fops = {
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_LEGACY |
-           DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+           DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ,
        .dev_priv_size = sizeof(drm_mga_buf_priv_t),
        .load = mga_driver_load,
        .unload = mga_driver_unload,
index 30726c9fe28c810486a127c3dfbd92c07836b2e3..6893934b26c0387a390540b770ec1e49252902bb 100644 (file)
@@ -12,6 +12,7 @@
  */
 #include <linux/module.h>
 #include <drm/drmP.h>
+#include <drm/drm_util.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
index acf7bfe684549728644759e394119717f4d18f54..7481a3d556adc8845c8995f348bb9907376c9a0c 100644 (file)
@@ -16,6 +16,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mgag200_drv.h"
 
index 9be7c355debd0be51ed34f107038aa2739edb6dd..d130825e2c752313a468b1b870d9fd0c9bca93cd 100644 (file)
 #include <linux/sort.h>
 #include <linux/debugfs.h>
 #include <linux/ktime.h>
-#include <drm/drm_mode.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_flip_work.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_rect.h>
 
 #include "dpu_kms.h"
index 36158b7d99cdb14a063b0c63be57a93314008c61..36af231bb73f02598a2ec950984f57cafaa61c95 100644 (file)
@@ -24,7 +24,7 @@
 #include "msm_drv.h"
 #include "dpu_kms.h"
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_intf.h"
index 8f2359dc87b4ea5b34aabe77bb288a365f5f3233..0cfd4c06b6106fe096632cf9c2dd42e96c320e4d 100644 (file)
@@ -16,9 +16,9 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_flip_work.h>
 #include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mdp4_kms.h"
 
@@ -244,14 +244,8 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 
        mode = &crtc->state->adjusted_mode;
 
-       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mdp4_crtc->name, mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("%s: set mode: " DRM_MODE_FMT,
+                       mdp4_crtc->name, DRM_MODE_ARG(mode));
 
        mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
                        MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
index 6a1ebdace391d947e54b856e42c020e1f105d98f..caa39b4621e34a9b0aa6c905f50304181b8ad060 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mdp4_kms.h"
 
@@ -58,14 +58,7 @@ static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
 
        mode = adjusted_mode;
 
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
 
        ctrl_pol = 0;
        if (mode->flags & DRM_MODE_FLAG_NHSYNC)
index a8fd14d4846b37d95160b6b00ea84c32a270c2bc..259d51971401c8c56b58d430826b82e4646da0bf 100644 (file)
@@ -16,7 +16,7 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mdp4_kms.h"
 
@@ -104,14 +104,7 @@ static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
 
        mode = adjusted_mode;
 
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
 
        mdp4_dtv_encoder->pixclock = mode->clock * 1000;
 
index c9e34501a89e8c485743b8a27632783bde4355bb..df6f9803a1d71ae2f0d0ca8e531240812982806b 100644 (file)
@@ -17,7 +17,7 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mdp4_kms.h"
 
@@ -273,14 +273,7 @@ static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
 
        mode = adjusted_mode;
 
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
 
        mdp4_lcdc_encoder->pixclock = mode->clock * 1000;
 
index c1962f29ec7d688e98ec57f40c9375210fc47af0..9bf9d6065c55c614d6dc3eae7c833d64954ed5a0 100644 (file)
@@ -12,7 +12,7 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mdp5_kms.h"
 
@@ -134,14 +134,7 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
 {
        mode = adjusted_mode;
 
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
        pingpong_tearcheck_setup(encoder, mode);
        mdp5_crtc_set_pipeline(encoder->crtc);
 }
index c5fde1a4191aaa03d7a002e52b803a2689519667..b0cf63c4e3d75afff6ca12d60976cb6ae31bb38f 100644 (file)
@@ -19,8 +19,8 @@
 #include <linux/sort.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_flip_work.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mdp5_kms.h"
 
@@ -384,14 +384,7 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
 
        mode = &crtc->state->adjusted_mode;
 
-       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       crtc->name, mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
 
        mixer_width = mode->hdisplay;
        if (r_mixer)
index fcd44d1d10682b6bf063ecf60848c635283689eb..820a62c4006327742a2b6397041e64ae5059d967 100644 (file)
@@ -17,7 +17,7 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "mdp5_kms.h"
 
@@ -118,14 +118,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
 
        mode = adjusted_mode;
 
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
 
        ctrl_pol = 0;
 
index 7cebcb2b3a379246e55faef1dcce6657d2ec3a3a..6153514db04c2a209c7d08b756ec1397e532c8b5 100644 (file)
@@ -16,6 +16,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <drm/drm_util.h>
 
 #include "mdp5_kms.h"
 #include "mdp5_smp.h"
index 08f3fc6771b7829de66c1af953cbb3abb16ccee2..9c6b31c2d79f01948836c77cbdf1b8628708441e 100644 (file)
@@ -168,7 +168,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
                        bool is_dual_dsi);
 int msm_dsi_host_power_off(struct mipi_dsi_host *host);
 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
-                                       struct drm_display_mode *mode);
+                                 const struct drm_display_mode *mode);
 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
                                        unsigned long *panel_flags);
 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
index 38e481d2d606f9f8d3a0600c8e5f5495db00a848..610183db1daf640498568fe272e6100a75d22b93 100644 (file)
@@ -2424,7 +2424,7 @@ unlock_ret:
 }
 
 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
-                                       struct drm_display_mode *mode)
+                                 const struct drm_display_mode *mode)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 
index 80aa6344185e13068ad4442e069fbe2a91cb1e52..979a8e9293412349f1d29d894fb4e2f64d1d5cc2 100644 (file)
@@ -527,8 +527,8 @@ disable_phy:
 }
 
 static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
+               const struct drm_display_mode *mode,
+               const struct drm_display_mode *adjusted_mode)
 {
        int id = dsi_mgr_bridge_get_id(bridge);
        struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -536,14 +536,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
        struct mipi_dsi_host *host = msm_dsi->host;
        bool is_dual_dsi = IS_DUAL_DSI();
 
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
 
        if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
                return;
index 931a5c97cccf6fc65066b4bf77a67a6e274cecaa..11166bf232ffcdecc56dc5a9925455a784fa00ee 100644 (file)
@@ -52,22 +52,15 @@ static void edp_bridge_post_disable(struct drm_bridge *bridge)
 }
 
 static void edp_bridge_mode_set(struct drm_bridge *bridge,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
+               const struct drm_display_mode *mode,
+               const struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = bridge->dev;
        struct drm_connector *connector;
        struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
        struct msm_edp *edp = edp_bridge->edp;
 
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
+       DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                if ((connector->encoder != NULL) &&
index 98d61c690260f49a09228c75ee325738ed484aa5..03197b8959ba640e73f63b0201797edebb9e941e 100644 (file)
@@ -101,7 +101,8 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
        u32 val;
        int len;
 
-       drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+       drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                hdmi->connector, mode);
 
        len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
        if (len < 0) {
@@ -207,8 +208,8 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
 }
 
 static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
-                struct drm_display_mode *mode,
-                struct drm_display_mode *adjusted_mode)
+                const struct drm_display_mode *mode,
+                const struct drm_display_mode *adjusted_mode)
 {
        struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
        struct hdmi *hdmi = hdmi_bridge->hdmi;
index d2cdc7b553feb4dd8b3864dfa69c3bad3502c273..8747fb32a1062997e2136839b5a54f8fa8977676 100644 (file)
@@ -1063,8 +1063,7 @@ static const struct file_operations fops = {
 };
 
 static struct drm_driver msm_driver = {
-       .driver_features    = DRIVER_HAVE_IRQ |
-                               DRIVER_GEM |
+       .driver_features    = DRIVER_GEM |
                                DRIVER_PRIME |
                                DRIVER_RENDER |
                                DRIVER_ATOMIC |
index 9cd6a96c6bf2a522d413681f20d918753921f554..4e0c6c2f9a869a713123ecdef2149e40dc365914 100644 (file)
@@ -39,8 +39,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/msm_drm.h>
 #include <drm/drm_gem.h>
index 67dfd8d3dc12caaaf9c17c7e5c3d6dbc344bb696..136058978e0fd095013ded524e18c839849a52e3 100644 (file)
@@ -16,8 +16,8 @@
  */
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
index 24b1f0c1432e99e8eae3bd536d80c11efd6e60a7..0ee1ca8a316a4cd5c4b30f4ae026b8a9095796b7 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/iopoll.h>
index 88ba003979e6b841cce82bd390282335ebb13028..967379f3f571995f89393742dae4d0f559c7f834 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 
 #include "mxsfb_drv.h"
@@ -263,23 +263,12 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
 
        drm_kms_helper_poll_init(drm);
 
-       mxsfb->fbdev = drm_fbdev_cma_init(drm, 32,
-                                         drm->mode_config.num_connector);
-       if (IS_ERR(mxsfb->fbdev)) {
-               ret = PTR_ERR(mxsfb->fbdev);
-               mxsfb->fbdev = NULL;
-               dev_err(drm->dev, "Failed to init FB CMA area\n");
-               goto err_cma;
-       }
-
        platform_set_drvdata(pdev, drm);
 
        drm_helper_hpd_irq_event(drm);
 
        return 0;
 
-err_cma:
-       drm_irq_uninstall(drm);
 err_irq:
        drm_panel_detach(mxsfb->panel);
 err_vblank:
@@ -290,11 +279,6 @@ err_vblank:
 
 static void mxsfb_unload(struct drm_device *drm)
 {
-       struct mxsfb_drm_private *mxsfb = drm->dev_private;
-
-       if (mxsfb->fbdev)
-               drm_fbdev_cma_fini(mxsfb->fbdev);
-
        drm_kms_helper_poll_fini(drm);
        drm_mode_config_cleanup(drm);
 
@@ -307,13 +291,6 @@ static void mxsfb_unload(struct drm_device *drm)
        pm_runtime_disable(drm->dev);
 }
 
-static void mxsfb_lastclose(struct drm_device *drm)
-{
-       struct mxsfb_drm_private *mxsfb = drm->dev_private;
-
-       drm_fbdev_cma_restore_mode(mxsfb->fbdev);
-}
-
 static void mxsfb_irq_preinstall(struct drm_device *drm)
 {
        struct mxsfb_drm_private *mxsfb = drm->dev_private;
@@ -345,9 +322,7 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
 
 static struct drm_driver mxsfb_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET |
-                                 DRIVER_PRIME | DRIVER_ATOMIC |
-                                 DRIVER_HAVE_IRQ,
-       .lastclose              = mxsfb_lastclose,
+                                 DRIVER_PRIME | DRIVER_ATOMIC,
        .irq_handler            = mxsfb_irq_handler,
        .irq_preinstall         = mxsfb_irq_preinstall,
        .irq_uninstall          = mxsfb_irq_preinstall,
@@ -412,6 +387,8 @@ static int mxsfb_probe(struct platform_device *pdev)
        if (ret)
                goto err_unload;
 
+       drm_fbdev_generic_setup(drm, 32);
+
        return 0;
 
 err_unload:
index 5d0883fc805bc1e7822e9caa1903e05d786f0ba4..bedd6801edca5ef715aa947a15b31803fcd90522 100644 (file)
@@ -37,7 +37,6 @@ struct mxsfb_drm_private {
        struct drm_simple_display_pipe  pipe;
        struct drm_connector            connector;
        struct drm_panel                *panel;
-       struct drm_fbdev_cma            *fbdev;
 };
 
 int mxsfb_setup_crtc(struct drm_device *dev);
index e5edf016a4399f44a84f2e657953b59212d00520..27add9976931bc29fc2bc36f5d0cb0183c3e2601 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 #include <drm/drmP.h>
 
index 6a4ca139cf5d71efb67427dd2f79eb886482d817..26fd71c06626f5046d2c24fe8dc6754336c28ecb 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_encoder.h"
@@ -750,7 +751,9 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
                /* Disable the crtc to ensure a full modeset is
                 * performed whenever it's turned on again. */
                if (crtc)
-                       drm_crtc_force_disable(crtc);
+                       drm_crtc_helper_set_mode(crtc, &crtc->mode,
+                                                crtc->x, crtc->y,
+                                                crtc->primary->fb);
        }
 
        return 0;
index 26af4578593916d7700836d7810de18652c0abe6..2e8a5fd9b26278ad3f2ac0b1523bedb505c80d30 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_scdc_helper.h>
 #include <drm/drm_edid.h>
 
@@ -561,7 +561,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
        u32 max_ac_packet;
        union hdmi_infoframe avi_frame;
        union hdmi_infoframe vendor_frame;
-       bool scdc_supported, high_tmds_clock_ratio = false, scrambling = false;
+       bool high_tmds_clock_ratio = false, scrambling = false;
        u8 config;
        int ret;
        int size;
@@ -571,10 +571,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
                return;
 
        hdmi = &nv_connector->base.display_info.hdmi;
-       scdc_supported = hdmi->scdc.supported;
 
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
-                                                      scdc_supported);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi,
+                                                      &nv_connector->base, mode);
        if (!ret) {
                /* We have an AVI InfoFrame, populate it to the display */
                args.pwr.avi_infoframe_length
@@ -680,6 +679,8 @@ nv50_msto_payload(struct nv50_msto *msto)
        struct nv50_mstm *mstm = mstc->mstm;
        int vcpi = mstc->port->vcpi.vcpi, i;
 
+       WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
+
        NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
        for (i = 0; i < mstm->mgr.max_payloads; i++) {
                struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
@@ -704,14 +705,16 @@ nv50_msto_cleanup(struct nv50_msto *msto)
        struct nv50_mstc *mstc = msto->mstc;
        struct nv50_mstm *mstm = mstc->mstm;
 
+       if (!msto->disabled)
+               return;
+
        NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
-       if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
-               drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
-       if (msto->disabled) {
-               msto->mstc = NULL;
-               msto->head = NULL;
-               msto->disabled = false;
-       }
+
+       drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
+
+       msto->mstc = NULL;
+       msto->head = NULL;
+       msto->disabled = false;
 }
 
 static void
@@ -731,8 +734,10 @@ nv50_msto_prepare(struct nv50_msto *msto)
                               (0x0100 << msto->head->base.index),
        };
 
+       mutex_lock(&mstm->mgr.payload_lock);
+
        NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
-       if (mstc->port && mstc->port->vcpi.vcpi > 0) {
+       if (mstc->port->vcpi.vcpi > 0) {
                struct drm_dp_payload *payload = nv50_msto_payload(msto);
                if (payload) {
                        args.vcpi.start_slot = payload->start_slot;
@@ -746,7 +751,9 @@ nv50_msto_prepare(struct nv50_msto *msto)
                  msto->encoder.name, msto->head->base.base.name,
                  args.vcpi.start_slot, args.vcpi.num_slots,
                  args.vcpi.pbn, args.vcpi.aligned_pbn);
+
        nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
+       mutex_unlock(&mstm->mgr.payload_lock);
 }
 
 static int
@@ -754,16 +761,23 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
                       struct drm_crtc_state *crtc_state,
                       struct drm_connector_state *conn_state)
 {
-       struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
+       struct drm_atomic_state *state = crtc_state->state;
+       struct drm_connector *connector = conn_state->connector;
+       struct nv50_mstc *mstc = nv50_mstc(connector);
        struct nv50_mstm *mstm = mstc->mstm;
-       int bpp = conn_state->connector->display_info.bpc * 3;
+       int bpp = connector->display_info.bpc * 3;
        int slots;
 
-       mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
+       mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
+                                        bpp);
 
-       slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
-       if (slots < 0)
-               return slots;
+       if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+           !drm_connector_is_unregistered(connector)) {
+               slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
+                                                     mstc->port, mstc->pbn);
+               if (slots < 0)
+                       return slots;
+       }
 
        return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
                                           mstc->native);
@@ -829,8 +843,7 @@ nv50_msto_disable(struct drm_encoder *encoder)
        struct nv50_mstc *mstc = msto->mstc;
        struct nv50_mstm *mstm = mstc->mstm;
 
-       if (mstc->port)
-               drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
+       drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
 
        mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
        mstm->modified = true;
@@ -927,12 +940,43 @@ nv50_mstc_get_modes(struct drm_connector *connector)
        return ret;
 }
 
+static int
+nv50_mstc_atomic_check(struct drm_connector *connector,
+                      struct drm_connector_state *new_conn_state)
+{
+       struct drm_atomic_state *state = new_conn_state->state;
+       struct nv50_mstc *mstc = nv50_mstc(connector);
+       struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
+       struct drm_connector_state *old_conn_state =
+               drm_atomic_get_old_connector_state(state, connector);
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *new_crtc = new_conn_state->crtc;
+
+       if (!old_conn_state->crtc)
+               return 0;
+
+       /* We only want to free VCPI if this state disables the CRTC on this
+        * connector
+        */
+       if (new_crtc) {
+               crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+               if (!crtc_state ||
+                   !drm_atomic_crtc_needs_modeset(crtc_state) ||
+                   crtc_state->enable)
+                       return 0;
+       }
+
+       return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
+}
+
 static const struct drm_connector_helper_funcs
 nv50_mstc_help = {
        .get_modes = nv50_mstc_get_modes,
        .mode_valid = nv50_mstc_mode_valid,
        .best_encoder = nv50_mstc_best_encoder,
        .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+       .atomic_check = nv50_mstc_atomic_check,
 };
 
 static enum drm_connector_status
@@ -942,7 +986,7 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status conn_status;
        int ret;
 
-       if (!mstc->port)
+       if (drm_connector_is_unregistered(connector))
                return connector_status_disconnected;
 
        ret = pm_runtime_get_sync(connector->dev->dev);
@@ -961,7 +1005,10 @@ static void
 nv50_mstc_destroy(struct drm_connector *connector)
 {
        struct nv50_mstc *mstc = nv50_mstc(connector);
+
        drm_connector_cleanup(&mstc->connector);
+       drm_dp_mst_put_port_malloc(mstc->port);
+
        kfree(mstc);
 }
 
@@ -1009,6 +1056,7 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
        drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
        drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
        drm_connector_set_path_property(&mstc->connector, path);
+       drm_dp_mst_get_port_malloc(port);
        return 0;
 }
 
@@ -1073,10 +1121,6 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
 
        drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
 
-       drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
-       mstc->port = NULL;
-       drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
-
        drm_connector_put(&mstc->connector);
 }
 
@@ -1099,11 +1143,8 @@ nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
        int ret;
 
        ret = nv50_mstc_new(mstm, port, path, &mstc);
-       if (ret) {
-               if (mstc)
-                       mstc->connector.funcs->destroy(&mstc->connector);
+       if (ret)
                return NULL;
-       }
 
        return &mstc->connector;
 }
@@ -2117,6 +2158,10 @@ nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
                        return ret;
        }
 
+       ret = drm_dp_mst_atomic_check(state);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
index 3f463c91314ab6ae6b896a759b1a56b9494807d3..4116ee62adafea9762cc99121adad315c02b726f 100644 (file)
@@ -33,6 +33,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_atomic.h>
 
 #include "nouveau_reg.h"
index f326ffd867664cd98dfc9dea789f91917c21856a..56b6ac1b8edd22835619b77d773a1e2b149a144f 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <nvif/class.h>
 
@@ -453,7 +454,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
                if (drm_drv_uses_atomic_modeset(dev))
                        drm_atomic_helper_shutdown(dev);
                else
-                       drm_crtc_force_disable_all(dev);
+                       drm_helper_force_disable_all(dev);
        }
 
        /* disable flip completion events */
index 032317c81bf057c049689fbc25522c3f0d91203a..d275418edd24689ac5d5f99dc7cb510882c3d58b 100644 (file)
@@ -374,9 +374,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
        strcpy(info->fix.id, "nouveaufb");
        if (!chan)
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
+               info->flags = FBINFO_HWACCEL_DISABLED;
        else
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+               info->flags = FBINFO_HWACCEL_COPYAREA |
                              FBINFO_HWACCEL_FILLRECT |
                              FBINFO_HWACCEL_IMAGEBLIT;
        info->fbops = &nouveau_fbcon_sw_ops;
index b81302c4bf9e6d49a678ff8e34829b8c0bc7dbc3..9da94d10782a8829a5cc2376bfe12929d9fda9fa 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "omap_drv.h"
 
@@ -305,14 +305,9 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
        drm_mode_destroy(dev, new_mode);
 
 done:
-       DBG("connector: mode %s: "
-                       "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+       DBG("connector: mode %s: " DRM_MODE_FMT,
                        (ret == MODE_OK) ? "valid" : "invalid",
-                       mode->base.id, mode->name, mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+                       DRM_MODE_ARG(mode));
 
        return ret;
 }
index caffc547ef97e385cb913f77fd3ebe55a082d486..d99e24dcc0bff125dff5cc857d13d24a735c2518 100644 (file)
@@ -18,7 +18,6 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
 #include <linux/math64.h>
@@ -427,12 +426,8 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
        struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 
-       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-           omap_crtc->name, mode->base.id, mode->name,
-           mode->vrefresh, mode->clock,
-           mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal,
-           mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
-           mode->type, mode->flags);
+       DBG("%s: set mode: " DRM_MODE_FMT,
+           omap_crtc->name, DRM_MODE_ARG(mode));
 
        drm_display_mode_to_videomode(mode, &omap_crtc->vm);
 }
index 5e67d58cbc281c1d8e2bd5b8ddd66f82073bdb7b..f8292278f57dd24d05783b1659fb047657a43b11 100644 (file)
@@ -21,8 +21,8 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "omap_dmm_tiler.h"
 #include "omap_drv.h"
index bd7f2c227a25e98c88fa83c5c6f15eff177f6f69..0c57d2814c517667a2350912b9e4eba4dfb51f51 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/workqueue.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/omap_drm.h>
 
index 933ebc9f9faaaf35049a53aef49551e3ff1e740a..0d85b3a357678d791cb3d4bc5a85c98f3c0e10c3 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/list.h>
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_edid.h>
 
 #include "omap_drv.h"
@@ -76,8 +76,8 @@ static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
                struct hdmi_avi_infoframe avi;
                int r;
 
-               r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
-                                                            false);
+               r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+                                                            adjusted_mode);
                if (r == 0)
                        dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
        }
index 4d264fd554d8f7d80ba63011b9250b562db90253..4f8eb9d08f998768403dc4672c502732eec2cbcb 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/seq_file.h>
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 
 #include "omap_dmm_tiler.h"
index aee99194499f1675be7e160b96241f1f62dbbcd1..851c59f07eb1fc3c73d5f03599b78b0e67c2b49e 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <drm/drm_crtc.h>
+#include <drm/drm_util.h>
 #include <drm/drm_fb_helper.h>
 
 #include "omap_drv.h"
index 3f3537719beb2eddaa6c132236da3a8f2ff0bdc7..3e070153ef21cc17c843a36d3f707eeeec48979e 100644 (file)
@@ -77,6 +77,17 @@ config DRM_PANEL_JDI_LT070ME05000
          The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
          24 bit per pixel.
 
+config DRM_PANEL_KINGDISPLAY_KD097D04
+       tristate "Kingdisplay kd097d04 panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for Kingdisplay kd097d04
+         TFT-LCD modules. The panel has a 1536x2048 resolution and uses
+         24 bit RGB per pixel. It provides a MIPI DSI interface to
+         the host and has a built-in LED backlight.
+
 config DRM_PANEL_SAMSUNG_LD9040
        tristate "Samsung LD9040 RGB/SPI panel"
        depends on OF && SPI
@@ -196,6 +207,16 @@ config DRM_PANEL_SHARP_LS043T1LE01
          Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
          (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
 
+config DRM_PANEL_SITRONIX_ST7701
+       tristate "Sitronix ST7701 panel driver"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for the Sitronix
+         ST7701 controller for 480X864 LCD panels with MIPI/RGB/SPI
+         system interfaces.
+
 config DRM_PANEL_SITRONIX_ST7789V
        tristate "Sitronix ST7789V panel"
        depends on OF && SPI
@@ -204,6 +225,15 @@ config DRM_PANEL_SITRONIX_ST7789V
          Say Y here if you want to enable support for the Sitronix
          ST7789V controller for 240x320 LCD panels
 
+config DRM_PANEL_TPO_TPG110
+       tristate "TPO TPG 800x400 panel"
+       depends on OF && SPI && GPIOLIB
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for TPO TPG110
+         400CH LTPS TFT LCD Single Chip Digital Driver for up to
+         800x400 LCD panels.
+
 config DRM_PANEL_TRULY_NT35597_WQXGA
        tristate "Truly WQXGA"
        depends on OF
index 4396658a7996466c62ae24b8391757044eb0e712..e7ab71968bbf6e8366dd01c6f016388368db3038 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
 obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
+obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
 obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
 obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -20,5 +21,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
 obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
+obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
index ca4ae45dd307c6cb29b16efb99caa1c1965736a5..8e5724b63f1f80f2ef50a6b4d2ca8c4a92d7e4dd 100644 (file)
@@ -70,18 +70,12 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel)
 static int innolux_panel_disable(struct drm_panel *panel)
 {
        struct innolux_panel *innolux = to_innolux_panel(panel);
-       int err;
 
        if (!innolux->enabled)
                return 0;
 
        backlight_disable(innolux->backlight);
 
-       err = mipi_dsi_dcs_set_display_off(innolux->link);
-       if (err < 0)
-               DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
-                             err);
-
        innolux->enabled = false;
 
        return 0;
@@ -95,6 +89,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
        if (!innolux->prepared)
                return 0;
 
+       err = mipi_dsi_dcs_set_display_off(innolux->link);
+       if (err < 0)
+               DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+                             err);
+
        err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
        if (err < 0) {
                DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
new file mode 100644 (file)
index 0000000..2a25a91
--- /dev/null
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct kingdisplay_panel {
+       struct drm_panel base;
+       struct mipi_dsi_device *link;
+
+       struct backlight_device *backlight;
+       struct regulator *supply;
+       struct gpio_desc *enable_gpio;
+
+       bool prepared;
+       bool enabled;
+};
+
+struct kingdisplay_panel_cmd {
+       char cmd;
+       char data;
+};
+
+/*
+ * According to the discussion on
+ * https://review.coreboot.org/#/c/coreboot/+/22472/
+ * the panel init array is not part of the panels datasheet but instead
+ * just came in this form from the panel vendor.
+ */
+static const struct kingdisplay_panel_cmd init_code[] = {
+       /* voltage setting */
+       { 0xB0, 0x00 },
+       { 0xB2, 0x02 },
+       { 0xB3, 0x11 },
+       { 0xB4, 0x00 },
+       { 0xB6, 0x80 },
+       /* VCOM disable */
+       { 0xB7, 0x02 },
+       { 0xB8, 0x80 },
+       { 0xBA, 0x43 },
+       /* VCOM setting */
+       { 0xBB, 0x53 },
+       /* VSP setting */
+       { 0xBC, 0x0A },
+       /* VSN setting */
+       { 0xBD, 0x4A },
+       /* VGH setting */
+       { 0xBE, 0x2F },
+       /* VGL setting */
+       { 0xBF, 0x1A },
+       { 0xF0, 0x39 },
+       { 0xF1, 0x22 },
+       /* Gamma setting */
+       { 0xB0, 0x02 },
+       { 0xC0, 0x00 },
+       { 0xC1, 0x01 },
+       { 0xC2, 0x0B },
+       { 0xC3, 0x15 },
+       { 0xC4, 0x22 },
+       { 0xC5, 0x11 },
+       { 0xC6, 0x15 },
+       { 0xC7, 0x19 },
+       { 0xC8, 0x1A },
+       { 0xC9, 0x16 },
+       { 0xCA, 0x18 },
+       { 0xCB, 0x13 },
+       { 0xCC, 0x18 },
+       { 0xCD, 0x13 },
+       { 0xCE, 0x1C },
+       { 0xCF, 0x19 },
+       { 0xD0, 0x21 },
+       { 0xD1, 0x2C },
+       { 0xD2, 0x2F },
+       { 0xD3, 0x30 },
+       { 0xD4, 0x19 },
+       { 0xD5, 0x1F },
+       { 0xD6, 0x00 },
+       { 0xD7, 0x01 },
+       { 0xD8, 0x0B },
+       { 0xD9, 0x15 },
+       { 0xDA, 0x22 },
+       { 0xDB, 0x11 },
+       { 0xDC, 0x15 },
+       { 0xDD, 0x19 },
+       { 0xDE, 0x1A },
+       { 0xDF, 0x16 },
+       { 0xE0, 0x18 },
+       { 0xE1, 0x13 },
+       { 0xE2, 0x18 },
+       { 0xE3, 0x13 },
+       { 0xE4, 0x1C },
+       { 0xE5, 0x19 },
+       { 0xE6, 0x21 },
+       { 0xE7, 0x2C },
+       { 0xE8, 0x2F },
+       { 0xE9, 0x30 },
+       { 0xEA, 0x19 },
+       { 0xEB, 0x1F },
+       /* GOA MUX setting */
+       { 0xB0, 0x01 },
+       { 0xC0, 0x10 },
+       { 0xC1, 0x0F },
+       { 0xC2, 0x0E },
+       { 0xC3, 0x0D },
+       { 0xC4, 0x0C },
+       { 0xC5, 0x0B },
+       { 0xC6, 0x0A },
+       { 0xC7, 0x09 },
+       { 0xC8, 0x08 },
+       { 0xC9, 0x07 },
+       { 0xCA, 0x06 },
+       { 0xCB, 0x05 },
+       { 0xCC, 0x00 },
+       { 0xCD, 0x01 },
+       { 0xCE, 0x02 },
+       { 0xCF, 0x03 },
+       { 0xD0, 0x04 },
+       { 0xD6, 0x10 },
+       { 0xD7, 0x0F },
+       { 0xD8, 0x0E },
+       { 0xD9, 0x0D },
+       { 0xDA, 0x0C },
+       { 0xDB, 0x0B },
+       { 0xDC, 0x0A },
+       { 0xDD, 0x09 },
+       { 0xDE, 0x08 },
+       { 0xDF, 0x07 },
+       { 0xE0, 0x06 },
+       { 0xE1, 0x05 },
+       { 0xE2, 0x00 },
+       { 0xE3, 0x01 },
+       { 0xE4, 0x02 },
+       { 0xE5, 0x03 },
+       { 0xE6, 0x04 },
+       { 0xE7, 0x00 },
+       { 0xEC, 0xC0 },
+       /* GOA timing setting */
+       { 0xB0, 0x03 },
+       { 0xC0, 0x01 },
+       { 0xC2, 0x6F },
+       { 0xC3, 0x6F },
+       { 0xC5, 0x36 },
+       { 0xC8, 0x08 },
+       { 0xC9, 0x04 },
+       { 0xCA, 0x41 },
+       { 0xCC, 0x43 },
+       { 0xCF, 0x60 },
+       { 0xD2, 0x04 },
+       { 0xD3, 0x04 },
+       { 0xD4, 0x03 },
+       { 0xD5, 0x02 },
+       { 0xD6, 0x01 },
+       { 0xD7, 0x00 },
+       { 0xDB, 0x01 },
+       { 0xDE, 0x36 },
+       { 0xE6, 0x6F },
+       { 0xE7, 0x6F },
+       /* GOE setting */
+       { 0xB0, 0x06 },
+       { 0xB8, 0xA5 },
+       { 0xC0, 0xA5 },
+       { 0xD5, 0x3F },
+};
+
+static inline
+struct kingdisplay_panel *to_kingdisplay_panel(struct drm_panel *panel)
+{
+       return container_of(panel, struct kingdisplay_panel, base);
+}
+
+static int kingdisplay_panel_disable(struct drm_panel *panel)
+{
+       struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
+       int err;
+
+       if (!kingdisplay->enabled)
+               return 0;
+
+       backlight_disable(kingdisplay->backlight);
+
+       err = mipi_dsi_dcs_set_display_off(kingdisplay->link);
+       if (err < 0)
+               DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+                             err);
+
+       kingdisplay->enabled = false;
+
+       return 0;
+}
+
+static int kingdisplay_panel_unprepare(struct drm_panel *panel)
+{
+       struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
+       int err;
+
+       if (!kingdisplay->prepared)
+               return 0;
+
+       err = mipi_dsi_dcs_enter_sleep_mode(kingdisplay->link);
+       if (err < 0) {
+               DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+                             err);
+               return err;
+       }
+
+       /* T15: 120ms */
+       msleep(120);
+
+       gpiod_set_value_cansleep(kingdisplay->enable_gpio, 0);
+
+       err = regulator_disable(kingdisplay->supply);
+       if (err < 0)
+               return err;
+
+       kingdisplay->prepared = false;
+
+       return 0;
+}
+
+static int kingdisplay_panel_prepare(struct drm_panel *panel)
+{
+       struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
+       int err, regulator_err;
+       unsigned int i;
+
+       if (kingdisplay->prepared)
+               return 0;
+
+       gpiod_set_value_cansleep(kingdisplay->enable_gpio, 0);
+
+       err = regulator_enable(kingdisplay->supply);
+       if (err < 0)
+               return err;
+
+       /* T2: 15ms */
+       usleep_range(15000, 16000);
+
+       gpiod_set_value_cansleep(kingdisplay->enable_gpio, 1);
+
+       /* T4: 15ms */
+       usleep_range(15000, 16000);
+
+       for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+               err = mipi_dsi_generic_write(kingdisplay->link, &init_code[i],
+                                       sizeof(struct kingdisplay_panel_cmd));
+               if (err < 0) {
+                       DRM_DEV_ERROR(panel->dev, "failed write init cmds: %d\n",
+                                     err);
+                       goto poweroff;
+               }
+       }
+
+       err = mipi_dsi_dcs_exit_sleep_mode(kingdisplay->link);
+       if (err < 0) {
+               DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+                             err);
+               goto poweroff;
+       }
+
+       /* T6: 120ms */
+       msleep(120);
+
+       err = mipi_dsi_dcs_set_display_on(kingdisplay->link);
+       if (err < 0) {
+               DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+                             err);
+               goto poweroff;
+       }
+
+       /* T7: 10ms */
+       usleep_range(10000, 11000);
+
+       kingdisplay->prepared = true;
+
+       return 0;
+
+poweroff:
+       gpiod_set_value_cansleep(kingdisplay->enable_gpio, 0);
+
+       regulator_err = regulator_disable(kingdisplay->supply);
+       if (regulator_err)
+               DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n",
+                             regulator_err);
+
+       return err;
+}
+
+static int kingdisplay_panel_enable(struct drm_panel *panel)
+{
+       struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
+       int ret;
+
+       if (kingdisplay->enabled)
+               return 0;
+
+       ret = backlight_enable(kingdisplay->backlight);
+       if (ret) {
+               DRM_DEV_ERROR(panel->drm->dev,
+                             "Failed to enable backlight %d\n", ret);
+               return ret;
+       }
+
+       kingdisplay->enabled = true;
+
+       return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+       .clock = 229000,
+       .hdisplay = 1536,
+       .hsync_start = 1536 + 100,
+       .hsync_end = 1536 + 100 + 24,
+       .htotal = 1536 + 100 + 24 + 100,
+       .vdisplay = 2048,
+       .vsync_start = 2048 + 95,
+       .vsync_end = 2048 + 95 + 2,
+       .vtotal = 2048 + 95 + 2 + 23,
+       .vrefresh = 60,
+};
+
+static int kingdisplay_panel_get_modes(struct drm_panel *panel)
+{
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(panel->drm, &default_mode);
+       if (!mode) {
+               DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+                             default_mode.hdisplay, default_mode.vdisplay,
+                             default_mode.vrefresh);
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+
+       drm_mode_probed_add(panel->connector, mode);
+
+       panel->connector->display_info.width_mm = 147;
+       panel->connector->display_info.height_mm = 196;
+       panel->connector->display_info.bpc = 8;
+
+       return 1;
+}
+
+static const struct drm_panel_funcs kingdisplay_panel_funcs = {
+       .disable = kingdisplay_panel_disable,
+       .unprepare = kingdisplay_panel_unprepare,
+       .prepare = kingdisplay_panel_prepare,
+       .enable = kingdisplay_panel_enable,
+       .get_modes = kingdisplay_panel_get_modes,
+};
+
+static const struct of_device_id kingdisplay_of_match[] = {
+       { .compatible = "kingdisplay,kd097d04", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, kingdisplay_of_match);
+
+static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
+{
+       struct device *dev = &kingdisplay->link->dev;
+       int err;
+
+       kingdisplay->supply = devm_regulator_get(dev, "power");
+       if (IS_ERR(kingdisplay->supply))
+               return PTR_ERR(kingdisplay->supply);
+
+       kingdisplay->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+                                                          GPIOD_OUT_HIGH);
+       if (IS_ERR(kingdisplay->enable_gpio)) {
+               err = PTR_ERR(kingdisplay->enable_gpio);
+               dev_dbg(dev, "failed to get enable gpio: %d\n", err);
+               kingdisplay->enable_gpio = NULL;
+       }
+
+       kingdisplay->backlight = devm_of_find_backlight(dev);
+       if (IS_ERR(kingdisplay->backlight))
+               return PTR_ERR(kingdisplay->backlight);
+
+       drm_panel_init(&kingdisplay->base);
+       kingdisplay->base.funcs = &kingdisplay_panel_funcs;
+       kingdisplay->base.dev = &kingdisplay->link->dev;
+
+       return drm_panel_add(&kingdisplay->base);
+}
+
+static void kingdisplay_panel_del(struct kingdisplay_panel *kingdisplay)
+{
+       drm_panel_remove(&kingdisplay->base);
+}
+
+static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
+{
+       struct kingdisplay_panel *kingdisplay;
+       int err;
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+                         MIPI_DSI_MODE_LPM;
+
+       kingdisplay = devm_kzalloc(&dsi->dev, sizeof(*kingdisplay), GFP_KERNEL);
+       if (!kingdisplay)
+               return -ENOMEM;
+
+       mipi_dsi_set_drvdata(dsi, kingdisplay);
+       kingdisplay->link = dsi;
+
+       err = kingdisplay_panel_add(kingdisplay);
+       if (err < 0)
+               return err;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
+{
+       struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
+       int err;
+
+       err = kingdisplay_panel_unprepare(&kingdisplay->base);
+       if (err < 0)
+               DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
+                             err);
+
+       err = kingdisplay_panel_disable(&kingdisplay->base);
+       if (err < 0)
+               DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
+
+       err = mipi_dsi_detach(dsi);
+       if (err < 0)
+               DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+                             err);
+
+       kingdisplay_panel_del(kingdisplay);
+
+       return 0;
+}
+
+static void kingdisplay_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+       struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
+
+       kingdisplay_panel_unprepare(&kingdisplay->base);
+       kingdisplay_panel_disable(&kingdisplay->base);
+}
+
+static struct mipi_dsi_driver kingdisplay_panel_driver = {
+       .driver = {
+               .name = "panel-kingdisplay-kd097d04",
+               .of_match_table = kingdisplay_of_match,
+       },
+       .probe = kingdisplay_panel_probe,
+       .remove = kingdisplay_panel_remove,
+       .shutdown = kingdisplay_panel_shutdown,
+};
+module_mipi_dsi_driver(kingdisplay_panel_driver);
+
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Nickey Yang <nickey.yang@rock-chips.com>");
+MODULE_DESCRIPTION("kingdisplay KD097D04 panel driver");
+MODULE_LICENSE("GPL v2");
index 9c69e739a524dd59d84f5758cc210686e4f797b4..9e8218f6a3f20aebe19a01f8140dbb78c17fc86f 100644 (file)
@@ -1597,6 +1597,30 @@ static const struct panel_desc kyo_tcg121xglp = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
 };
 
+static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
+       .clock = 7000,
+       .hdisplay = 320,
+       .hsync_start = 320 + 20,
+       .hsync_end = 320 + 20 + 30,
+       .htotal = 320 + 20 + 30 + 38,
+       .vdisplay = 240,
+       .vsync_start = 240 + 4,
+       .vsync_end = 240 + 4 + 3,
+       .vtotal = 240 + 4 + 3 + 15,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc lemaker_bl035_rgb_002 = {
+       .modes = &lemaker_bl035_rgb_002_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 70,
+               .height = 52,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
 static const struct drm_display_mode lg_lb070wv8_mode = {
        .clock = 33246,
        .hdisplay = 800,
@@ -2008,6 +2032,30 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
 };
 
+static const struct drm_display_mode pda_91_00156_a0_mode = {
+       .clock = 33300,
+       .hdisplay = 800,
+       .hsync_start = 800 + 1,
+       .hsync_end = 800 + 1 + 64,
+       .htotal = 800 + 1 + 64 + 64,
+       .vdisplay = 480,
+       .vsync_start = 480 + 1,
+       .vsync_end = 480 + 1 + 23,
+       .vtotal = 480 + 1 + 23 + 22,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc pda_91_00156_a0  = {
+       .modes = &pda_91_00156_a0_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 152,
+               .height = 91,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+
 static const struct drm_display_mode qd43003c0_40_mode = {
        .clock = 9000,
        .hdisplay = 480,
@@ -2637,6 +2685,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "kyo,tcg121xglp",
                .data = &kyo_tcg121xglp,
+       }, {
+               .compatible = "lemaker,bl035-rgb-002",
+               .data = &lemaker_bl035_rgb_002,
        }, {
                .compatible = "lg,lb070wv8",
                .data = &lg_lb070wv8,
@@ -2685,6 +2736,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "ortustech,com43h4m85ulc",
                .data = &ortustech_com43h4m85ulc,
+       }, {
+               .compatible = "pda,91-00156-a0",
+               .data = &pda_91_00156_a0,
        }, {
                .compatible = "qiaodian,qd43003c0-40",
                .data = &qd43003c0_40,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
new file mode 100644 (file)
index 0000000..63f9a1c
--- /dev/null
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019, Amarula Solutions.
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+/* Command2 BKx selection command */
+#define DSI_CMD2BKX_SEL                        0xFF
+
+/* Command2, BK0 commands */
+#define DSI_CMD2_BK0_PVGAMCTRL         0xB0 /* Positive Voltage Gamma Control */
+#define DSI_CMD2_BK0_NVGAMCTRL         0xB1 /* Negative Voltage Gamma Control */
+#define DSI_CMD2_BK0_LNESET            0xC0 /* Display Line setting */
+#define DSI_CMD2_BK0_PORCTRL           0xC1 /* Porch control */
+#define DSI_CMD2_BK0_INVSEL            0xC2 /* Inversion selection, Frame Rate Control */
+
+/* Command2, BK1 commands */
+#define DSI_CMD2_BK1_VRHS              0xB0 /* Vop amplitude setting */
+#define DSI_CMD2_BK1_VCOM              0xB1 /* VCOM amplitude setting */
+#define DSI_CMD2_BK1_VGHSS             0xB2 /* VGH Voltage setting */
+#define DSI_CMD2_BK1_TESTCMD           0xB3 /* TEST Command Setting */
+#define DSI_CMD2_BK1_VGLS              0xB5 /* VGL Voltage setting */
+#define DSI_CMD2_BK1_PWCTLR1           0xB7 /* Power Control 1 */
+#define DSI_CMD2_BK1_PWCTLR2           0xB8 /* Power Control 2 */
+#define DSI_CMD2_BK1_SPD1              0xC1 /* Source pre_drive timing set1 */
+#define DSI_CMD2_BK1_SPD2              0xC2 /* Source EQ2 Setting */
+#define DSI_CMD2_BK1_MIPISET1          0xD0 /* MIPI Setting 1 */
+
+/**
+ * Command2 with BK function selection.
+ *
+ * BIT[4, 0]: [CN2, BKXSEL]
+ * 10 = CMD2BK0, Command2 BK0
+ * 11 = CMD2BK1, Command2 BK1
+ * 00 = Command2 disable
+ */
+#define DSI_CMD2BK1_SEL                        0x11
+#define DSI_CMD2BK0_SEL                        0x10
+#define DSI_CMD2BKX_SEL_NONE           0x00
+
+/* Command2, BK0 bytes */
+#define DSI_LINESET_LINE               0x69
+#define DSI_LINESET_LDE_EN             BIT(7)
+#define DSI_LINESET_LINEDELTA          GENMASK(1, 0)
+#define DSI_CMD2_BK0_LNESET_B1         DSI_LINESET_LINEDELTA
+#define DSI_CMD2_BK0_LNESET_B0         (DSI_LINESET_LDE_EN | DSI_LINESET_LINE)
+#define DSI_INVSEL_DEFAULT             GENMASK(5, 4)
+#define DSI_INVSEL_NLINV               GENMASK(2, 0)
+#define DSI_INVSEL_RTNI                        GENMASK(2, 1)
+#define DSI_CMD2_BK0_INVSEL_B1         DSI_INVSEL_RTNI
+#define DSI_CMD2_BK0_INVSEL_B0         (DSI_INVSEL_DEFAULT | DSI_INVSEL_NLINV)
+#define DSI_CMD2_BK0_PORCTRL_B0(m)     ((m)->vtotal - (m)->vsync_end)
+#define DSI_CMD2_BK0_PORCTRL_B1(m)     ((m)->vsync_start - (m)->vdisplay)
+
+/* Command2, BK1 bytes */
+#define DSI_CMD2_BK1_VRHA_SET          0x45
+#define DSI_CMD2_BK1_VCOM_SET          0x13
+#define DSI_CMD2_BK1_VGHSS_SET         GENMASK(2, 0)
+#define DSI_CMD2_BK1_TESTCMD_VAL       BIT(7)
+#define DSI_VGLS_DEFAULT               BIT(6)
+#define DSI_VGLS_SEL                   GENMASK(2, 0)
+#define DSI_CMD2_BK1_VGLS_SET          (DSI_VGLS_DEFAULT | DSI_VGLS_SEL)
+#define DSI_PWCTLR1_AP                 BIT(7) /* Gamma OP bias, max */
+#define DSI_PWCTLR1_APIS               BIT(2) /* Source OP input bias, min */
+#define DSI_PWCTLR1_APOS               BIT(0) /* Source OP output bias, min */
+#define DSI_CMD2_BK1_PWCTLR1_SET       (DSI_PWCTLR1_AP | DSI_PWCTLR1_APIS | \
+                                       DSI_PWCTLR1_APOS)
+#define DSI_PWCTLR2_AVDD               BIT(5) /* AVDD 6.6v */
+#define DSI_PWCTLR2_AVCL               0x0    /* AVCL -4.4v */
+#define DSI_CMD2_BK1_PWCTLR2_SET       (DSI_PWCTLR2_AVDD | DSI_PWCTLR2_AVCL)
+#define DSI_SPD1_T2D                   BIT(3)
+#define DSI_CMD2_BK1_SPD1_SET          (GENMASK(6, 4) | DSI_SPD1_T2D)
+#define DSI_CMD2_BK1_SPD2_SET          DSI_CMD2_BK1_SPD1_SET
+#define DSI_MIPISET1_EOT_EN            BIT(3)
+#define DSI_CMD2_BK1_MIPISET1_SET      (BIT(7) | DSI_MIPISET1_EOT_EN)
+
+struct st7701_panel_desc {
+       const struct drm_display_mode *mode;
+       unsigned int lanes;
+       unsigned long flags;
+       enum mipi_dsi_pixel_format format;
+       const char *const *supply_names;
+       unsigned int num_supplies;
+       unsigned int panel_sleep_delay;
+};
+
+struct st7701 {
+       struct drm_panel panel;
+       struct mipi_dsi_device *dsi;
+       const struct st7701_panel_desc *desc;
+
+       struct backlight_device *backlight;
+       struct regulator_bulk_data *supplies;
+       struct gpio_desc *reset;
+       unsigned int sleep_delay;
+};
+
+static inline struct st7701 *panel_to_st7701(struct drm_panel *panel)
+{
+       return container_of(panel, struct st7701, panel);
+}
+
+static inline int st7701_dsi_write(struct st7701 *st7701, const void *seq,
+                                  size_t len)
+{
+       return mipi_dsi_dcs_write_buffer(st7701->dsi, seq, len);
+}
+
+#define ST7701_DSI(st7701, seq...)                             \
+       {                                                       \
+               const u8 d[] = { seq };                         \
+               st7701_dsi_write(st7701, d, ARRAY_SIZE(d));     \
+       }
+
+static void st7701_init_sequence(struct st7701 *st7701)
+{
+       const struct drm_display_mode *mode = st7701->desc->mode;
+
+       ST7701_DSI(st7701, MIPI_DCS_SOFT_RESET, 0x00);
+
+       /* We need to wait 5ms before sending new commands */
+       msleep(5);
+
+       ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+
+       msleep(st7701->sleep_delay);
+
+       /* Command2, BK0 */
+       ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+                  0x77, 0x01, 0x00, 0x00, DSI_CMD2BK0_SEL);
+       ST7701_DSI(st7701, DSI_CMD2_BK0_PVGAMCTRL, 0x00, 0x0E, 0x15, 0x0F,
+                  0x11, 0x08, 0x08, 0x08, 0x08, 0x23, 0x04, 0x13, 0x12,
+                  0x2B, 0x34, 0x1F);
+       ST7701_DSI(st7701, DSI_CMD2_BK0_NVGAMCTRL, 0x00, 0x0E, 0x95, 0x0F,
+                  0x13, 0x07, 0x09, 0x08, 0x08, 0x22, 0x04, 0x10, 0x0E,
+                  0x2C, 0x34, 0x1F);
+       ST7701_DSI(st7701, DSI_CMD2_BK0_LNESET,
+                  DSI_CMD2_BK0_LNESET_B0, DSI_CMD2_BK0_LNESET_B1);
+       ST7701_DSI(st7701, DSI_CMD2_BK0_PORCTRL,
+                  DSI_CMD2_BK0_PORCTRL_B0(mode),
+                  DSI_CMD2_BK0_PORCTRL_B1(mode));
+       ST7701_DSI(st7701, DSI_CMD2_BK0_INVSEL,
+                  DSI_CMD2_BK0_INVSEL_B0, DSI_CMD2_BK0_INVSEL_B1);
+
+       /* Command2, BK1 */
+       ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+                       0x77, 0x01, 0x00, 0x00, DSI_CMD2BK1_SEL);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS, DSI_CMD2_BK1_VRHA_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM, DSI_CMD2_BK1_VCOM_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS, DSI_CMD2_BK1_VGHSS_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_TESTCMD, DSI_CMD2_BK1_TESTCMD_VAL);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS, DSI_CMD2_BK1_VGLS_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1, DSI_CMD2_BK1_PWCTLR1_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2, DSI_CMD2_BK1_PWCTLR2_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1, DSI_CMD2_BK1_SPD1_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2, DSI_CMD2_BK1_SPD2_SET);
+       ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1, DSI_CMD2_BK1_MIPISET1_SET);
+
+       /**
+        * ST7701_SPEC_V1.2 is unable to provide enough information above this
+        * specific command sequence, so grab the same from vendor BSP driver.
+        */
+       ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+       ST7701_DSI(st7701, 0xE1, 0x0B, 0x00, 0x0D, 0x00, 0x0C, 0x00, 0x0E,
+                  0x00, 0x00, 0x44, 0x44);
+       ST7701_DSI(st7701, 0xE2, 0x33, 0x33, 0x44, 0x44, 0x64, 0x00, 0x66,
+                  0x00, 0x65, 0x00, 0x67, 0x00, 0x00);
+       ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
+       ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
+       ST7701_DSI(st7701, 0xE5, 0x0C, 0x78, 0x3C, 0xA0, 0x0E, 0x78, 0x3C,
+                  0xA0, 0x10, 0x78, 0x3C, 0xA0, 0x12, 0x78, 0x3C, 0xA0);
+       ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
+       ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
+       ST7701_DSI(st7701, 0xE8, 0x0D, 0x78, 0x3C, 0xA0, 0x0F, 0x78, 0x3C,
+                  0xA0, 0x11, 0x78, 0x3C, 0xA0, 0x13, 0x78, 0x3C, 0xA0);
+       ST7701_DSI(st7701, 0xEB, 0x02, 0x02, 0x39, 0x39, 0xEE, 0x44, 0x00);
+       ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
+       ST7701_DSI(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
+                  0xFF, 0xFF, 0xFF, 0xF3, 0x27, 0x65, 0x40, 0x1F, 0xFF);
+
+       /* disable Command2 */
+       ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+                  0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+}
+
+static int st7701_prepare(struct drm_panel *panel)
+{
+       struct st7701 *st7701 = panel_to_st7701(panel);
+       int ret;
+
+       gpiod_set_value(st7701->reset, 0);
+
+       ret = regulator_bulk_enable(st7701->desc->num_supplies,
+                                   st7701->supplies);
+       if (ret < 0)
+               return ret;
+       msleep(20);
+
+       gpiod_set_value(st7701->reset, 1);
+       msleep(150);
+
+       st7701_init_sequence(st7701);
+
+       return 0;
+}
+
+static int st7701_enable(struct drm_panel *panel)
+{
+       struct st7701 *st7701 = panel_to_st7701(panel);
+
+       ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
+       backlight_enable(st7701->backlight);
+
+       return 0;
+}
+
+static int st7701_disable(struct drm_panel *panel)
+{
+       struct st7701 *st7701 = panel_to_st7701(panel);
+
+       backlight_disable(st7701->backlight);
+       ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
+
+       return 0;
+}
+
+static int st7701_unprepare(struct drm_panel *panel)
+{
+       struct st7701 *st7701 = panel_to_st7701(panel);
+
+       ST7701_DSI(st7701, MIPI_DCS_ENTER_SLEEP_MODE, 0x00);
+
+       msleep(st7701->sleep_delay);
+
+       gpiod_set_value(st7701->reset, 0);
+
+       /**
+        * During the Resetting period, the display will be blanked
+        * (The display is entering blanking sequence, which maximum
+        * time is 120 ms, when Reset Starts in Sleep Out â€“mode. The
+        * display remains the blank state in Sleep In â€“mode.) and
+        * then return to Default condition for Hardware Reset.
+        *
+        * So we need wait sleep_delay time to make sure reset completed.
+        */
+       msleep(st7701->sleep_delay);
+
+       regulator_bulk_disable(st7701->desc->num_supplies, st7701->supplies);
+
+       return 0;
+}
+
+static int st7701_get_modes(struct drm_panel *panel)
+{
+       struct st7701 *st7701 = panel_to_st7701(panel);
+       const struct drm_display_mode *desc_mode = st7701->desc->mode;
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(panel->drm, desc_mode);
+       if (!mode) {
+               DRM_DEV_ERROR(&st7701->dsi->dev,
+                             "failed to add mode %ux%ux@%u\n",
+                             desc_mode->hdisplay, desc_mode->vdisplay,
+                             desc_mode->vrefresh);
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+       drm_mode_probed_add(panel->connector, mode);
+
+       panel->connector->display_info.width_mm = desc_mode->width_mm;
+       panel->connector->display_info.height_mm = desc_mode->height_mm;
+
+       return 1;
+}
+
+static const struct drm_panel_funcs st7701_funcs = {
+       .disable        = st7701_disable,
+       .unprepare      = st7701_unprepare,
+       .prepare        = st7701_prepare,
+       .enable         = st7701_enable,
+       .get_modes      = st7701_get_modes,
+};
+
+static const struct drm_display_mode ts8550b_mode = {
+       .clock          = 27500,
+
+       .hdisplay       = 480,
+       .hsync_start    = 480 + 38,
+       .hsync_end      = 480 + 38 + 12,
+       .htotal         = 480 + 38 + 12 + 12,
+
+       .vdisplay       = 854,
+       .vsync_start    = 854 + 4,
+       .vsync_end      = 854 + 4 + 8,
+       .vtotal         = 854 + 4 + 8 + 18,
+
+       .width_mm       = 69,
+       .height_mm      = 139,
+
+       .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const char * const ts8550b_supply_names[] = {
+       "VCC",
+       "IOVCC",
+};
+
+static const struct st7701_panel_desc ts8550b_desc = {
+       .mode = &ts8550b_mode,
+       .lanes = 2,
+       .flags = MIPI_DSI_MODE_VIDEO,
+       .format = MIPI_DSI_FMT_RGB888,
+       .supply_names = ts8550b_supply_names,
+       .num_supplies = ARRAY_SIZE(ts8550b_supply_names),
+       .panel_sleep_delay = 80, /* panel need extra 80ms for sleep out cmd */
+};
+
+static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+{
+       const struct st7701_panel_desc *desc;
+       struct st7701 *st7701;
+       int ret, i;
+
+       st7701 = devm_kzalloc(&dsi->dev, sizeof(*st7701), GFP_KERNEL);
+       if (!st7701)
+               return -ENOMEM;
+
+       desc = of_device_get_match_data(&dsi->dev);
+       dsi->mode_flags = desc->flags;
+       dsi->format = desc->format;
+       dsi->lanes = desc->lanes;
+
+       st7701->supplies = devm_kcalloc(&dsi->dev, desc->num_supplies,
+                                       sizeof(*st7701->supplies),
+                                       GFP_KERNEL);
+       if (!st7701->supplies)
+               return -ENOMEM;
+
+       for (i = 0; i < desc->num_supplies; i++)
+               st7701->supplies[i].supply = desc->supply_names[i];
+
+       ret = devm_regulator_bulk_get(&dsi->dev, desc->num_supplies,
+                                     st7701->supplies);
+       if (ret < 0)
+               return ret;
+
+       st7701->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(st7701->reset)) {
+               DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+               return PTR_ERR(st7701->reset);
+       }
+
+       st7701->backlight = devm_of_find_backlight(&dsi->dev);
+       if (IS_ERR(st7701->backlight))
+               return PTR_ERR(st7701->backlight);
+
+       drm_panel_init(&st7701->panel);
+
+       /**
+        * Once sleep out has been issued, ST7701 IC required to wait 120ms
+        * before initiating new commands.
+        *
+        * On top of that some panels might need an extra delay to wait, so
+        * add panel specific delay for those cases. As now this panel specific
+        * delay information is referenced from those panel BSP driver, example
+        * ts8550b and there is no valid documentation for that.
+        */
+       st7701->sleep_delay = 120 + desc->panel_sleep_delay;
+       st7701->panel.funcs = &st7701_funcs;
+       st7701->panel.dev = &dsi->dev;
+
+       ret = drm_panel_add(&st7701->panel);
+       if (ret < 0)
+               return ret;
+
+       mipi_dsi_set_drvdata(dsi, st7701);
+       st7701->dsi = dsi;
+       st7701->desc = desc;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int st7701_dsi_remove(struct mipi_dsi_device *dsi)
+{
+       struct st7701 *st7701 = mipi_dsi_get_drvdata(dsi);
+
+       mipi_dsi_detach(dsi);
+       drm_panel_remove(&st7701->panel);
+
+       return 0;
+}
+
+static const struct of_device_id st7701_of_match[] = {
+       { .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
+       { }
+};
+MODULE_DEVICE_TABLE(of, st7701_of_match);
+
+static struct mipi_dsi_driver st7701_dsi_driver = {
+       .probe          = st7701_dsi_probe,
+       .remove         = st7701_dsi_remove,
+       .driver = {
+               .name           = "st7701",
+               .of_match_table = st7701_of_match,
+       },
+};
+module_mipi_dsi_driver(st7701_dsi_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_DESCRIPTION("Sitronix ST7701 LCD Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
new file mode 100644 (file)
index 0000000..5a9f8f4
--- /dev/null
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Panel driver for the TPO TPG110 400CH LTPS TFT LCD Single Chip
+ * Digital Driver.
+ *
+ * This chip drives a TFT LCD, so it does not know what kind of
+ * display is actually connected to it, so the width and height of that
+ * display needs to be supplied from the machine configuration.
+ *
+ * Author:
+ * Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#define TPG110_TEST                    0x00
+#define TPG110_CHIPID                  0x01
+#define TPG110_CTRL1                   0x02
+#define TPG110_RES_MASK                        GENMASK(2, 0)
+#define TPG110_RES_800X480             0x07
+#define TPG110_RES_640X480             0x06
+#define TPG110_RES_480X272             0x05
+#define TPG110_RES_480X640             0x04
+#define TPG110_RES_480X272_D           0x01 /* Dual scan: outputs 800x480 */
+#define TPG110_RES_400X240_D           0x00 /* Dual scan: outputs 800x480 */
+#define TPG110_CTRL2                   0x03
+#define TPG110_CTRL2_PM                        BIT(0)
+#define TPG110_CTRL2_RES_PM_CTRL       BIT(7)
+
+/**
+ * struct tpg110_panel_mode - lookup struct for the supported modes
+ */
+struct tpg110_panel_mode {
+       /**
+        * @name: the name of this panel
+        */
+       const char *name;
+       /**
+        * @magic: the magic value from the detection register
+        */
+       u32 magic;
+       /**
+        * @mode: the DRM display mode for this panel
+        */
+       struct drm_display_mode mode;
+       /**
+        * @bus_flags: the DRM bus flags for this panel e.g. inverted clock
+        */
+       u32 bus_flags;
+};
+
+/**
+ * struct tpg110 - state container for the TPG110 panel
+ */
+struct tpg110 {
+       /**
+        * @dev: the container device
+        */
+       struct device *dev;
+       /**
+        * @spi: the corresponding SPI device
+        */
+       struct spi_device *spi;
+       /**
+        * @panel: the DRM panel instance for this device
+        */
+       struct drm_panel panel;
+       /**
+        * @backlight: backlight for this panel
+        */
+       struct backlight_device *backlight;
+       /**
+        * @panel_type: the panel mode as detected
+        */
+       const struct tpg110_panel_mode *panel_mode;
+       /**
+        * @width: the width of this panel in mm
+        */
+       u32 width;
+       /**
+        * @height: the height of this panel in mm
+        */
+       u32 height;
+       /**
+        * @grestb: reset GPIO line
+        */
+       struct gpio_desc *grestb;
+};
+
+/*
+ * TPG110 modes, these are the simple modes, the dualscan modes that
+ * take 400x240 or 480x272 in and display as 800x480 are not listed.
+ */
+static const struct tpg110_panel_mode tpg110_modes[] = {
+       {
+               .name = "800x480 RGB",
+               .magic = TPG110_RES_800X480,
+               .mode = {
+                       .clock = 33200,
+                       .hdisplay = 800,
+                       .hsync_start = 800 + 40,
+                       .hsync_end = 800 + 40 + 1,
+                       .htotal = 800 + 40 + 1 + 216,
+                       .vdisplay = 480,
+                       .vsync_start = 480 + 10,
+                       .vsync_end = 480 + 10 + 1,
+                       .vtotal = 480 + 10 + 1 + 35,
+                       .vrefresh = 60,
+               },
+               .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+       },
+       {
+               .name = "640x480 RGB",
+               .magic = TPG110_RES_640X480,
+               .mode = {
+                       .clock = 25200,
+                       .hdisplay = 640,
+                       .hsync_start = 640 + 24,
+                       .hsync_end = 640 + 24 + 1,
+                       .htotal = 640 + 24 + 1 + 136,
+                       .vdisplay = 480,
+                       .vsync_start = 480 + 18,
+                       .vsync_end = 480 + 18 + 1,
+                       .vtotal = 480 + 18 + 1 + 27,
+                       .vrefresh = 60,
+               },
+               .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+       },
+       {
+               .name = "480x272 RGB",
+               .magic = TPG110_RES_480X272,
+               .mode = {
+                       .clock = 9000,
+                       .hdisplay = 480,
+                       .hsync_start = 480 + 2,
+                       .hsync_end = 480 + 2 + 1,
+                       .htotal = 480 + 2 + 1 + 43,
+                       .vdisplay = 272,
+                       .vsync_start = 272 + 2,
+                       .vsync_end = 272 + 2 + 1,
+                       .vtotal = 272 + 2 + 1 + 12,
+                       .vrefresh = 60,
+               },
+               .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+       },
+       {
+               .name = "480x640 RGB",
+               .magic = TPG110_RES_480X640,
+               .mode = {
+                       .clock = 20500,
+                       .hdisplay = 480,
+                       .hsync_start = 480 + 2,
+                       .hsync_end = 480 + 2 + 1,
+                       .htotal = 480 + 2 + 1 + 43,
+                       .vdisplay = 640,
+                       .vsync_start = 640 + 4,
+                       .vsync_end = 640 + 4 + 1,
+                       .vtotal = 640 + 4 + 1 + 8,
+                       .vrefresh = 60,
+               },
+               .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+       },
+       {
+               .name = "400x240 RGB",
+               .magic = TPG110_RES_400X240_D,
+               .mode = {
+                       .clock = 8300,
+                       .hdisplay = 400,
+                       .hsync_start = 400 + 20,
+                       .hsync_end = 400 + 20 + 1,
+                       .htotal = 400 + 20 + 1 + 108,
+                       .vdisplay = 240,
+                       .vsync_start = 240 + 2,
+                       .vsync_end = 240 + 2 + 1,
+                       .vtotal = 240 + 2 + 1 + 20,
+                       .vrefresh = 60,
+               },
+               .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+       },
+};
+
+static inline struct tpg110 *
+to_tpg110(struct drm_panel *panel)
+{
+       return container_of(panel, struct tpg110, panel);
+}
+
+static u8 tpg110_readwrite_reg(struct tpg110 *tpg, bool write,
+                              u8 address, u8 outval)
+{
+       struct spi_message m;
+       struct spi_transfer t[2];
+       u8 buf[2];
+       int ret;
+
+       spi_message_init(&m);
+       memset(t, 0, sizeof(t));
+
+       if (write) {
+               /*
+                * Clear address bit 0, 1 when writing, just to be sure
+                * The actual bit indicating a write here is bit 1, bit
+                * 0 is just surplus to pad it up to 8 bits.
+                */
+               buf[0] = address << 2;
+               buf[0] &= ~0x03;
+               buf[1] = outval;
+
+               t[0].bits_per_word = 8;
+               t[0].tx_buf = &buf[0];
+               t[0].len = 1;
+
+               t[1].tx_buf = &buf[1];
+               t[1].len = 1;
+               t[1].bits_per_word = 8;
+       } else {
+               /* Set address bit 0 to 1 to read */
+               buf[0] = address << 1;
+               buf[0] |= 0x01;
+
+               /*
+                * The last bit/clock is Hi-Z turnaround cycle, so we need
+                * to send only 7 bits here. The 8th bit is the high impedance
+                * turn-around cycle.
+                */
+               t[0].bits_per_word = 7;
+               t[0].tx_buf = &buf[0];
+               t[0].len = 1;
+
+               t[1].rx_buf = &buf[1];
+               t[1].len = 1;
+               t[1].bits_per_word = 8;
+       }
+
+       spi_message_add_tail(&t[0], &m);
+       spi_message_add_tail(&t[1], &m);
+       ret = spi_sync(tpg->spi, &m);
+       if (ret) {
+               DRM_DEV_ERROR(tpg->dev, "SPI message error %d\n", ret);
+               return ret;
+       }
+       if (write)
+               return 0;
+       /* Read */
+       return buf[1];
+}
+
+static u8 tpg110_read_reg(struct tpg110 *tpg, u8 address)
+{
+       return tpg110_readwrite_reg(tpg, false, address, 0);
+}
+
+static void tpg110_write_reg(struct tpg110 *tpg, u8 address, u8 outval)
+{
+       tpg110_readwrite_reg(tpg, true, address, outval);
+}
+
+static int tpg110_startup(struct tpg110 *tpg)
+{
+       u8 val;
+       int i;
+
+       /* De-assert the reset signal */
+       gpiod_set_value_cansleep(tpg->grestb, 0);
+       usleep_range(1000, 2000);
+       DRM_DEV_DEBUG(tpg->dev, "de-asserted GRESTB\n");
+
+       /* Test display communication */
+       tpg110_write_reg(tpg, TPG110_TEST, 0x55);
+       val = tpg110_read_reg(tpg, TPG110_TEST);
+       if (val != 0x55) {
+               DRM_DEV_ERROR(tpg->dev, "failed communication test\n");
+               return -ENODEV;
+       }
+
+       val = tpg110_read_reg(tpg, TPG110_CHIPID);
+       DRM_DEV_INFO(tpg->dev, "TPG110 chip ID: %d version: %d\n",
+                val >> 4, val & 0x0f);
+
+       /* Show display resolution */
+       val = tpg110_read_reg(tpg, TPG110_CTRL1);
+       val &= TPG110_RES_MASK;
+       switch (val) {
+       case TPG110_RES_400X240_D:
+               DRM_DEV_INFO(tpg->dev,
+                        "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)\n");
+               break;
+       case TPG110_RES_480X272_D:
+               DRM_DEV_INFO(tpg->dev,
+                        "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)\n");
+               break;
+       case TPG110_RES_480X640:
+               DRM_DEV_INFO(tpg->dev, "480x640 RGB\n");
+               break;
+       case TPG110_RES_480X272:
+               DRM_DEV_INFO(tpg->dev, "480x272 RGB\n");
+               break;
+       case TPG110_RES_640X480:
+               DRM_DEV_INFO(tpg->dev, "640x480 RGB\n");
+               break;
+       case TPG110_RES_800X480:
+               DRM_DEV_INFO(tpg->dev, "800x480 RGB\n");
+               break;
+       default:
+               DRM_DEV_ERROR(tpg->dev, "ILLEGAL RESOLUTION 0x%02x\n", val);
+               break;
+       }
+
+       /* From the producer side, this is the same resolution */
+       if (val == TPG110_RES_480X272_D)
+               val = TPG110_RES_480X272;
+
+       for (i = 0; i < ARRAY_SIZE(tpg110_modes); i++) {
+               const struct tpg110_panel_mode *pm;
+
+               pm = &tpg110_modes[i];
+               if (pm->magic == val) {
+                       tpg->panel_mode = pm;
+                       break;
+               }
+       }
+       if (i == ARRAY_SIZE(tpg110_modes)) {
+               DRM_DEV_ERROR(tpg->dev, "unsupported mode (%02x) detected\n",
+                       val);
+               return -ENODEV;
+       }
+
+       val = tpg110_read_reg(tpg, TPG110_CTRL2);
+       DRM_DEV_INFO(tpg->dev, "resolution and standby is controlled by %s\n",
+                (val & TPG110_CTRL2_RES_PM_CTRL) ? "software" : "hardware");
+       /* Take control over resolution and standby */
+       val |= TPG110_CTRL2_RES_PM_CTRL;
+       tpg110_write_reg(tpg, TPG110_CTRL2, val);
+
+       return 0;
+}
+
+static int tpg110_disable(struct drm_panel *panel)
+{
+       struct tpg110 *tpg = to_tpg110(panel);
+       u8 val;
+
+       /* Put chip into standby */
+       val = tpg110_read_reg(tpg, TPG110_CTRL2_PM);
+       val &= ~TPG110_CTRL2_PM;
+       tpg110_write_reg(tpg, TPG110_CTRL2_PM, val);
+
+       backlight_disable(tpg->backlight);
+
+       return 0;
+}
+
+static int tpg110_enable(struct drm_panel *panel)
+{
+       struct tpg110 *tpg = to_tpg110(panel);
+       u8 val;
+
+       backlight_enable(tpg->backlight);
+
+       /* Take chip out of standby */
+       val = tpg110_read_reg(tpg, TPG110_CTRL2_PM);
+       val |= TPG110_CTRL2_PM;
+       tpg110_write_reg(tpg, TPG110_CTRL2_PM, val);
+
+       return 0;
+}
+
+/**
+ * tpg110_get_modes() - return the appropriate mode
+ * @panel: the panel to get the mode for
+ *
+ * This currently does not present a forest of modes, instead it
+ * presents the mode that is configured for the system under use,
+ * and which is detected by reading the registers of the display.
+ */
+static int tpg110_get_modes(struct drm_panel *panel)
+{
+       struct drm_connector *connector = panel->connector;
+       struct tpg110 *tpg = to_tpg110(panel);
+       struct drm_display_mode *mode;
+
+       strncpy(connector->display_info.name, tpg->panel_mode->name,
+               DRM_DISPLAY_INFO_LEN);
+       connector->display_info.width_mm = tpg->width;
+       connector->display_info.height_mm = tpg->height;
+       connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
+
+       mode = drm_mode_duplicate(panel->drm, &tpg->panel_mode->mode);
+       drm_mode_set_name(mode);
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+       mode->width_mm = tpg->width;
+       mode->height_mm = tpg->height;
+
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs tpg110_drm_funcs = {
+       .disable = tpg110_disable,
+       .enable = tpg110_enable,
+       .get_modes = tpg110_get_modes,
+};
+
+static int tpg110_probe(struct spi_device *spi)
+{
+       struct device *dev = &spi->dev;
+       struct device_node *np = dev->of_node;
+       struct tpg110 *tpg;
+       int ret;
+
+       tpg = devm_kzalloc(dev, sizeof(*tpg), GFP_KERNEL);
+       if (!tpg)
+               return -ENOMEM;
+       tpg->dev = dev;
+
+       /* We get the physical display dimensions from the DT */
+       ret = of_property_read_u32(np, "width-mm", &tpg->width);
+       if (ret)
+               DRM_DEV_ERROR(dev, "no panel width specified\n");
+       ret = of_property_read_u32(np, "height-mm", &tpg->height);
+       if (ret)
+               DRM_DEV_ERROR(dev, "no panel height specified\n");
+
+       /* Look for some optional backlight */
+       tpg->backlight = devm_of_find_backlight(dev);
+       if (IS_ERR(tpg->backlight))
+               return PTR_ERR(tpg->backlight);
+
+       /* This asserts the GRESTB signal, putting the display into reset */
+       tpg->grestb = devm_gpiod_get(dev, "grestb", GPIOD_OUT_HIGH);
+       if (IS_ERR(tpg->grestb)) {
+               DRM_DEV_ERROR(dev, "no GRESTB GPIO\n");
+               return -ENODEV;
+       }
+
+       spi->bits_per_word = 8;
+       spi->mode |= SPI_3WIRE_HIZ;
+       ret = spi_setup(spi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "spi setup failed.\n");
+               return ret;
+       }
+       tpg->spi = spi;
+
+       ret = tpg110_startup(tpg);
+       if (ret)
+               return ret;
+
+       drm_panel_init(&tpg->panel);
+       tpg->panel.dev = dev;
+       tpg->panel.funcs = &tpg110_drm_funcs;
+       spi_set_drvdata(spi, tpg);
+
+       return drm_panel_add(&tpg->panel);
+}
+
+static int tpg110_remove(struct spi_device *spi)
+{
+       struct tpg110 *tpg = spi_get_drvdata(spi);
+
+       drm_panel_remove(&tpg->panel);
+       return 0;
+}
+
+static const struct of_device_id tpg110_match[] = {
+       { .compatible = "tpo,tpg110", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, tpg110_match);
+
+static struct spi_driver tpg110_driver = {
+       .probe          = tpg110_probe,
+       .remove         = tpg110_remove,
+       .driver         = {
+               .name   = "tpo-tpg110-panel",
+               .of_match_table = tpg110_match,
+       },
+};
+module_spi_driver(tpg110_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("TPO TPG110 panel driver");
+MODULE_LICENSE("GPL v2");
index 33e0483d62ae93360314f37d1ae43db6fa1ea540..a8958c201a885eb1f3968bbc600af74fbfce3e4d 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "pl111_drm.h"
 #include "pl111_versatile.h"
index 33a7d0c434b7f6b91d226759b393e176a1837df0..fc59d42b31af6554084f8f0fb7c7d4b364c9097d 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o
 
 obj-$(CONFIG_DRM_QXL)+= qxl.o
index dffc5093ff16b96cbd8f6f4ee0b99670f8032303..0a2e51af123089c50504cf1692cd588e0ba41b89 100644 (file)
@@ -25,6 +25,8 @@
 
 /* QXL cmd/ring handling */
 
+#include <drm/drm_util.h>
+
 #include "qxl_drv.h"
 #include "qxl_object.h"
 
@@ -372,25 +374,25 @@ void qxl_io_flush_surfaces(struct qxl_device *qdev)
 void qxl_io_destroy_primary(struct qxl_device *qdev)
 {
        wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
-       qdev->primary_created = false;
+       qdev->primary_bo->is_primary = false;
+       drm_gem_object_put_unlocked(&qdev->primary_bo->gem_base);
+       qdev->primary_bo = NULL;
 }
 
-void qxl_io_create_primary(struct qxl_device *qdev,
-                          unsigned int offset, struct qxl_bo *bo)
+void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
 {
        struct qxl_surface_create *create;
 
+       if (WARN_ON(qdev->primary_bo))
+               return;
+
        DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
        create = &qdev->ram_header->create_surface;
        create->format = bo->surf.format;
        create->width = bo->surf.width;
        create->height = bo->surf.height;
        create->stride = bo->surf.stride;
-       if (bo->shadow) {
-               create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset);
-       } else {
-               create->mem = qxl_bo_physical_address(qdev, bo, offset);
-       }
+       create->mem = qxl_bo_physical_address(qdev, bo, 0);
 
        DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
 
@@ -398,7 +400,9 @@ void qxl_io_create_primary(struct qxl_device *qdev,
        create->type = QXL_SURF_TYPE_PRIMARY;
 
        wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
-       qdev->primary_created = true;
+       qdev->primary_bo = bo;
+       qdev->primary_bo->is_primary = true;
+       drm_gem_object_get(&qdev->primary_bo->gem_base);
 }
 
 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
@@ -458,8 +462,7 @@ void qxl_surface_id_dealloc(struct qxl_device *qdev,
 }
 
 int qxl_hw_surface_alloc(struct qxl_device *qdev,
-                        struct qxl_bo *surf,
-                        struct ttm_mem_reg *new_mem)
+                        struct qxl_bo *surf)
 {
        struct qxl_surface_cmd *cmd;
        struct qxl_release *release;
@@ -485,16 +488,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
        cmd->u.surface_create.width = surf->surf.width;
        cmd->u.surface_create.height = surf->surf.height;
        cmd->u.surface_create.stride = surf->surf.stride;
-       if (new_mem) {
-               int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
-               struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
-
-               /* TODO - need to hold one of the locks to read tbo.offset */
-               cmd->u.surface_create.data = slot->high_bits;
-
-               cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
-       } else
-               cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
+       cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
        cmd->surface_id = surf->surface_id;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
index 72a1784dae5481db9a2c289ec81b36ff990b436a..08c725544a2fc5fe6357abeb0e6b131e35e39070 100644 (file)
  */
 
 #include <linux/crc32.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_helper.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "qxl_drv.h"
 #include "qxl_object.h"
@@ -48,8 +48,8 @@ static int qxl_alloc_client_monitors_config(struct qxl_device *qdev,
        }
        if (!qdev->client_monitors_config) {
                qdev->client_monitors_config = kzalloc(
-                               sizeof(struct qxl_monitors_config) +
-                               sizeof(struct qxl_head) * count, GFP_KERNEL);
+                               struct_size(qdev->client_monitors_config,
+                               heads, count), GFP_KERNEL);
                if (!qdev->client_monitors_config)
                        return -ENOMEM;
        }
@@ -80,10 +80,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
                DRM_DEBUG_KMS("no client monitors configured\n");
                return status;
        }
-       if (num_monitors > qdev->monitors_config->max_allowed) {
+       if (num_monitors > qxl_num_crtc) {
                DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
-                             qdev->monitors_config->max_allowed, num_monitors);
-               num_monitors = qdev->monitors_config->max_allowed;
+                             qxl_num_crtc, num_monitors);
+               num_monitors = qxl_num_crtc;
        } else {
                num_monitors = qdev->rom->client_monitors_config.count;
        }
@@ -96,8 +96,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
                return status;
        }
        /* we copy max from the client but it isn't used */
-       qdev->client_monitors_config->max_allowed =
-                               qdev->monitors_config->max_allowed;
+       qdev->client_monitors_config->max_allowed = qxl_num_crtc;
        for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
                struct qxl_urect *c_rect =
                        &qdev->rom->client_monitors_config.heads[i];
@@ -191,20 +190,63 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
        }
 }
 
-static int qxl_add_monitors_config_modes(struct drm_connector *connector,
-                                         unsigned *pwidth,
-                                         unsigned *pheight)
+static int qxl_check_mode(struct qxl_device *qdev,
+                         unsigned int width,
+                         unsigned int height)
+{
+       unsigned int stride;
+       unsigned int size;
+
+       if (check_mul_overflow(width, 4u, &stride))
+               return -EINVAL;
+       if (check_mul_overflow(stride, height, &size))
+               return -EINVAL;
+       if (size > qdev->vram_size)
+               return -ENOMEM;
+       return 0;
+}
+
+static int qxl_check_framebuffer(struct qxl_device *qdev,
+                                struct qxl_bo *bo)
+{
+       return qxl_check_mode(qdev, bo->surf.width, bo->surf.height);
+}
+
+static int qxl_add_mode(struct drm_connector *connector,
+                       unsigned int width,
+                       unsigned int height,
+                       bool preferred)
+{
+       struct drm_device *dev = connector->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_display_mode *mode = NULL;
+       int rc;
+
+       rc = qxl_check_mode(qdev, width, height);
+       if (rc != 0)
+               return 0;
+
+       mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
+       if (preferred)
+               mode->type |= DRM_MODE_TYPE_PREFERRED;
+       mode->hdisplay = width;
+       mode->vdisplay = height;
+       drm_mode_set_name(mode);
+       drm_mode_probed_add(connector, mode);
+       return 1;
+}
+
+static int qxl_add_monitors_config_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct qxl_device *qdev = dev->dev_private;
        struct qxl_output *output = drm_connector_to_qxl_output(connector);
        int h = output->index;
-       struct drm_display_mode *mode = NULL;
        struct qxl_head *head;
 
        if (!qdev->monitors_config)
                return 0;
-       if (h >= qdev->monitors_config->max_allowed)
+       if (h >= qxl_num_crtc)
                return 0;
        if (!qdev->client_monitors_config)
                return 0;
@@ -214,60 +256,28 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
        head = &qdev->client_monitors_config->heads[h];
        DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height);
 
-       mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
-                           false);
-       mode->type |= DRM_MODE_TYPE_PREFERRED;
-       mode->hdisplay = head->width;
-       mode->vdisplay = head->height;
-       drm_mode_set_name(mode);
-       *pwidth = head->width;
-       *pheight = head->height;
-       drm_mode_probed_add(connector, mode);
-       /* remember the last custom size for mode validation */
-       qdev->monitors_config_width = mode->hdisplay;
-       qdev->monitors_config_height = mode->vdisplay;
-       return 1;
+       return qxl_add_mode(connector, head->width, head->height, true);
 }
 
 static struct mode_size {
        int w;
        int h;
-} common_modes[] = {
-       { 640,  480},
+} extra_modes[] = {
        { 720,  480},
-       { 800,  600},
-       { 848,  480},
-       {1024,  768},
        {1152,  768},
-       {1280,  720},
-       {1280,  800},
        {1280,  854},
-       {1280,  960},
-       {1280, 1024},
-       {1440,  900},
-       {1400, 1050},
-       {1680, 1050},
-       {1600, 1200},
-       {1920, 1080},
-       {1920, 1200}
 };
 
-static int qxl_add_common_modes(struct drm_connector *connector,
-                                unsigned int pwidth,
-                                unsigned int pheight)
+static int qxl_add_extra_modes(struct drm_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
-       struct drm_display_mode *mode = NULL;
-       int i;
+       int i, ret = 0;
 
-       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
-               mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
-                                   60, false, false, false);
-               if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
-                       mode->type |= DRM_MODE_TYPE_PREFERRED;
-               drm_mode_probed_add(connector, mode);
-       }
-       return i - 1;
+       for (i = 0; i < ARRAY_SIZE(extra_modes); i++)
+               ret += qxl_add_mode(connector,
+                                   extra_modes[i].w,
+                                   extra_modes[i].h,
+                                   false);
+       return ret;
 }
 
 static void qxl_send_monitors_config(struct qxl_device *qdev)
@@ -302,13 +312,12 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
        struct qxl_head head;
        int oldcount, i = qcrtc->index;
 
-       if (!qdev->primary_created) {
+       if (!qdev->primary_bo) {
                DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason);
                return;
        }
 
-       if (!qdev->monitors_config ||
-           qdev->monitors_config->max_allowed <= i)
+       if (!qdev->monitors_config || qxl_num_crtc <= i)
                return;
 
        head.id = i;
@@ -323,6 +332,8 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
                head.y = crtc->y;
                if (qdev->monitors_config->count < i + 1)
                        qdev->monitors_config->count = i + 1;
+               if (qdev->primary_bo == qdev->dumb_shadow_bo)
+                       head.x += qdev->dumb_heads[i].x;
        } else if (i > 0) {
                head.width = 0;
                head.height = 0;
@@ -348,9 +359,10 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
        if (oldcount != qdev->monitors_config->count)
                DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n",
                              oldcount, qdev->monitors_config->count,
-                             qdev->monitors_config->max_allowed);
+                             qxl_num_crtc);
 
        qdev->monitors_config->heads[i] = head;
+       qdev->monitors_config->max_allowed = qxl_num_crtc;
        qxl_send_monitors_config(qdev);
 }
 
@@ -401,13 +413,15 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
        struct qxl_device *qdev = fb->dev->dev_private;
        struct drm_clip_rect norect;
        struct qxl_bo *qobj;
+       bool is_primary;
        int inc = 1;
 
        drm_modeset_lock_all(fb->dev);
 
        qobj = gem_to_qxl_bo(fb->obj[0]);
        /* if we aren't primary surface ignore this */
-       if (!qobj->is_primary) {
+       is_primary = qobj->shadow ? qobj->shadow->is_primary : qobj->is_primary;
+       if (!is_primary) {
                drm_modeset_unlock_all(fb->dev);
                return 0;
        }
@@ -424,7 +438,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
        }
 
        qxl_draw_dirty_fb(qdev, fb, qobj, flags, color,
-                         clips, num_clips, inc);
+                         clips, num_clips, inc, 0);
 
        drm_modeset_unlock_all(fb->dev);
 
@@ -466,12 +480,7 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
 
        bo = gem_to_qxl_bo(state->fb->obj[0]);
 
-       if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
-               DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
-               return -EINVAL;
-       }
-
-       return 0;
+       return qxl_check_framebuffer(qdev, bo);
 }
 
 static int qxl_primary_apply_cursor(struct drm_plane *plane)
@@ -526,15 +535,14 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
 {
        struct qxl_device *qdev = plane->dev->dev_private;
        struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
-       struct qxl_bo *bo_old;
+       struct qxl_bo *bo_old, *primary;
        struct drm_clip_rect norect = {
            .x1 = 0,
            .y1 = 0,
            .x2 = plane->state->fb->width,
            .y2 = plane->state->fb->height
        };
-       int ret;
-       bool same_shadow = false;
+       uint32_t dumb_shadow_offset = 0;
 
        if (old_state->fb) {
                bo_old = gem_to_qxl_bo(old_state->fb->obj[0]);
@@ -542,32 +550,21 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
                bo_old = NULL;
        }
 
-       if (bo == bo_old)
-               return;
+       primary = bo->shadow ? bo->shadow : bo;
 
-       if (bo_old && bo_old->shadow && bo->shadow &&
-           bo_old->shadow == bo->shadow) {
-               same_shadow = true;
-       }
-
-       if (bo_old && bo_old->is_primary) {
-               if (!same_shadow)
+       if (!primary->is_primary) {
+               if (qdev->primary_bo)
                        qxl_io_destroy_primary(qdev);
-               bo_old->is_primary = false;
-
-               ret = qxl_primary_apply_cursor(plane);
-               if (ret)
-                       DRM_ERROR(
-                       "could not set cursor after creating primary");
+               qxl_io_create_primary(qdev, primary);
+               qxl_primary_apply_cursor(plane);
        }
 
-       if (!bo->is_primary) {
-               if (!same_shadow)
-                       qxl_io_create_primary(qdev, 0, bo);
-               bo->is_primary = true;
-       }
+       if (bo->is_dumb)
+               dumb_shadow_offset =
+                       qdev->dumb_heads[plane->state->crtc->index].x;
 
-       qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1);
+       qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1,
+                         dumb_shadow_offset);
 }
 
 static void qxl_primary_atomic_disable(struct drm_plane *plane,
@@ -723,12 +720,68 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
        qxl_release_fence_buffer_objects(release);
 }
 
+static void qxl_update_dumb_head(struct qxl_device *qdev,
+                                int index, struct qxl_bo *bo)
+{
+       uint32_t width, height;
+
+       if (index >= qdev->monitors_config->max_allowed)
+               return;
+
+       if (bo && bo->is_dumb) {
+               width = bo->surf.width;
+               height = bo->surf.height;
+       } else {
+               width = 0;
+               height = 0;
+       }
+
+       if (qdev->dumb_heads[index].width == width &&
+           qdev->dumb_heads[index].height == height)
+               return;
+
+       DRM_DEBUG("#%d: %dx%d -> %dx%d\n", index,
+                 qdev->dumb_heads[index].width,
+                 qdev->dumb_heads[index].height,
+                 width, height);
+       qdev->dumb_heads[index].width = width;
+       qdev->dumb_heads[index].height = height;
+}
+
+static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
+                                struct qxl_surface *surf)
+{
+       struct qxl_head *head;
+       int i;
+
+       memset(surf, 0, sizeof(*surf));
+       for (i = 0; i < qdev->monitors_config->max_allowed; i++) {
+               head = qdev->dumb_heads + i;
+               head->x = surf->width;
+               surf->width += head->width;
+               if (surf->height < head->height)
+                       surf->height = head->height;
+       }
+       if (surf->width < 64)
+               surf->width = 64;
+       if (surf->height < 64)
+               surf->height = 64;
+       surf->format = SPICE_SURFACE_FMT_32_xRGB;
+       surf->stride = surf->width * 4;
+
+       if (!qdev->dumb_shadow_bo ||
+           qdev->dumb_shadow_bo->surf.width != surf->width ||
+           qdev->dumb_shadow_bo->surf.height != surf->height)
+               DRM_DEBUG("%dx%d\n", surf->width, surf->height);
+}
+
 static int qxl_plane_prepare_fb(struct drm_plane *plane,
                                struct drm_plane_state *new_state)
 {
        struct qxl_device *qdev = plane->dev->dev_private;
        struct drm_gem_object *obj;
-       struct qxl_bo *user_bo, *old_bo = NULL;
+       struct qxl_bo *user_bo;
+       struct qxl_surface surf;
        int ret;
 
        if (!new_state->fb)
@@ -738,28 +791,30 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
        user_bo = gem_to_qxl_bo(obj);
 
        if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
-           user_bo->is_dumb && !user_bo->shadow) {
-               if (plane->state->fb) {
-                       obj = plane->state->fb->obj[0];
-                       old_bo = gem_to_qxl_bo(obj);
+           user_bo->is_dumb) {
+               qxl_update_dumb_head(qdev, new_state->crtc->index,
+                                    user_bo);
+               qxl_calc_dumb_shadow(qdev, &surf);
+               if (!qdev->dumb_shadow_bo ||
+                   qdev->dumb_shadow_bo->surf.width  != surf.width ||
+                   qdev->dumb_shadow_bo->surf.height != surf.height) {
+                       if (qdev->dumb_shadow_bo) {
+                               drm_gem_object_put_unlocked
+                                       (&qdev->dumb_shadow_bo->gem_base);
+                               qdev->dumb_shadow_bo = NULL;
+                       }
+                       qxl_bo_create(qdev, surf.height * surf.stride,
+                                     true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
+                                     &qdev->dumb_shadow_bo);
                }
-               if (old_bo && old_bo->shadow &&
-                   user_bo->gem_base.size == old_bo->gem_base.size &&
-                   plane->state->crtc     == new_state->crtc &&
-                   plane->state->crtc_w   == new_state->crtc_w &&
-                   plane->state->crtc_h   == new_state->crtc_h &&
-                   plane->state->src_x    == new_state->src_x &&
-                   plane->state->src_y    == new_state->src_y &&
-                   plane->state->src_w    == new_state->src_w &&
-                   plane->state->src_h    == new_state->src_h &&
-                   plane->state->rotation == new_state->rotation &&
-                   plane->state->zpos     == new_state->zpos) {
-                       drm_gem_object_get(&old_bo->shadow->gem_base);
-                       user_bo->shadow = old_bo->shadow;
-               } else {
-                       qxl_bo_create(qdev, user_bo->gem_base.size,
-                                     true, true, QXL_GEM_DOMAIN_VRAM, NULL,
-                                     &user_bo->shadow);
+               if (user_bo->shadow != qdev->dumb_shadow_bo) {
+                       if (user_bo->shadow) {
+                               drm_gem_object_put_unlocked
+                                       (&user_bo->shadow->gem_base);
+                               user_bo->shadow = NULL;
+                       }
+                       drm_gem_object_get(&qdev->dumb_shadow_bo->gem_base);
+                       user_bo->shadow = qdev->dumb_shadow_bo;
                }
        }
 
@@ -788,7 +843,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
        user_bo = gem_to_qxl_bo(obj);
        qxl_bo_unpin(user_bo);
 
-       if (user_bo->shadow && !user_bo->is_primary) {
+       if (old_state->fb != plane->state->fb && user_bo->shadow) {
                drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
                user_bo->shadow = NULL;
        }
@@ -925,14 +980,26 @@ free_mem:
 
 static int qxl_conn_get_modes(struct drm_connector *connector)
 {
+       struct drm_device *dev = connector->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_output *output = drm_connector_to_qxl_output(connector);
        unsigned int pwidth = 1024;
        unsigned int pheight = 768;
        int ret = 0;
 
-       ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
-       if (ret < 0)
-               return ret;
-       ret += qxl_add_common_modes(connector, pwidth, pheight);
+       if (qdev->client_monitors_config) {
+               struct qxl_head *head;
+               head = &qdev->client_monitors_config->heads[output->index];
+               if (head->width)
+                       pwidth = head->width;
+               if (head->height)
+                       pheight = head->height;
+       }
+
+       ret += drm_add_modes_noedid(connector, 8192, 8192);
+       ret += qxl_add_extra_modes(connector);
+       ret += qxl_add_monitors_config_modes(connector);
+       drm_set_preferred_mode(connector, pwidth, pheight);
        return ret;
 }
 
@@ -941,20 +1008,11 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
 {
        struct drm_device *ddev = connector->dev;
        struct qxl_device *qdev = ddev->dev_private;
-       int i;
-
-       /* TODO: is this called for user defined modes? (xrandr --add-mode)
-        * TODO: check that the mode fits in the framebuffer */
 
-       if (qdev->monitors_config_width == mode->hdisplay &&
-           qdev->monitors_config_height == mode->vdisplay)
-               return MODE_OK;
+       if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
+               return MODE_BAD;
 
-       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
-               if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
-                       return MODE_OK;
-       }
-       return MODE_BAD;
+       return MODE_OK;
 }
 
 static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
@@ -1096,9 +1154,8 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
 {
        int ret;
        struct drm_gem_object *gobj;
-       int max_allowed = qxl_num_crtc;
        int monitors_config_size = sizeof(struct qxl_monitors_config) +
-               max_allowed * sizeof(struct qxl_head);
+               qxl_num_crtc * sizeof(struct qxl_head);
 
        ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
                                    QXL_GEM_DOMAIN_VRAM,
@@ -1120,7 +1177,12 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
                qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
 
        memset(qdev->monitors_config, 0, monitors_config_size);
-       qdev->monitors_config->max_allowed = max_allowed;
+       qdev->dumb_heads = kcalloc(qxl_num_crtc, sizeof(qdev->dumb_heads[0]),
+                                  GFP_KERNEL);
+       if (!qdev->dumb_heads) {
+               qxl_destroy_monitors_object(qdev);
+               return -ENOMEM;
+       }
        return 0;
 }
 
@@ -1172,18 +1234,11 @@ int qxl_modeset_init(struct qxl_device *qdev)
        qxl_display_read_client_monitors_config(qdev);
 
        drm_mode_config_reset(&qdev->ddev);
-
-       /* primary surface must be created by this point, to allow
-        * issuing command queue commands and having them read by
-        * spice server. */
-       qxl_fbdev_init(qdev);
        return 0;
 }
 
 void qxl_modeset_fini(struct qxl_device *qdev)
 {
-       qxl_fbdev_fini(qdev);
-
        qxl_destroy_monitors_object(qdev);
        drm_mode_config_cleanup(&qdev->ddev);
 }
index c408bb83c7a92809532698b19398fd64e7a51ccc..97c3f1a95a320bc5529000ad0bbfa8a0b6a69635 100644 (file)
@@ -109,152 +109,6 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
        return 0;
 }
 
-static int alloc_palette_object(struct qxl_device *qdev,
-                               struct qxl_release *release,
-                               struct qxl_bo **palette_bo)
-{
-       return qxl_alloc_bo_reserved(qdev, release,
-                                    sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
-                                    palette_bo);
-}
-
-static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
-                                  struct qxl_release *release,
-                                  const struct qxl_fb_image *qxl_fb_image)
-{
-       const struct fb_image *fb_image = &qxl_fb_image->fb_image;
-       uint32_t visual = qxl_fb_image->visual;
-       const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
-       struct qxl_palette *pal;
-       int ret;
-       uint32_t fgcolor, bgcolor;
-       static uint64_t unique; /* we make no attempt to actually set this
-                                * correctly globaly, since that would require
-                                * tracking all of our palettes. */
-       ret = qxl_bo_kmap(palette_bo, (void **)&pal);
-       if (ret)
-               return ret;
-       pal->num_ents = 2;
-       pal->unique = unique++;
-       if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
-               /* NB: this is the only used branch currently. */
-               fgcolor = pseudo_palette[fb_image->fg_color];
-               bgcolor = pseudo_palette[fb_image->bg_color];
-       } else {
-               fgcolor = fb_image->fg_color;
-               bgcolor = fb_image->bg_color;
-       }
-       pal->ents[0] = bgcolor;
-       pal->ents[1] = fgcolor;
-       qxl_bo_kunmap(palette_bo);
-       return 0;
-}
-
-void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
-                       int stride /* filled in if 0 */)
-{
-       struct qxl_device *qdev = qxl_fb_image->qdev;
-       struct qxl_drawable *drawable;
-       struct qxl_rect rect;
-       const struct fb_image *fb_image = &qxl_fb_image->fb_image;
-       int x = fb_image->dx;
-       int y = fb_image->dy;
-       int width = fb_image->width;
-       int height = fb_image->height;
-       const char *src = fb_image->data;
-       int depth = fb_image->depth;
-       struct qxl_release *release;
-       struct qxl_image *image;
-       int ret;
-       struct qxl_drm_image *dimage;
-       struct qxl_bo *palette_bo = NULL;
-
-       if (stride == 0)
-               stride = depth * width / 8;
-
-       ret = alloc_drawable(qdev, &release);
-       if (ret)
-               return;
-
-       ret = qxl_image_alloc_objects(qdev, release,
-                                     &dimage,
-                                     height, stride);
-       if (ret)
-               goto out_free_drawable;
-
-       if (depth == 1) {
-               ret = alloc_palette_object(qdev, release, &palette_bo);
-               if (ret)
-                       goto out_free_image;
-       }
-
-       /* do a reservation run over all the objects we just allocated */
-       ret = qxl_release_reserve_list(release, true);
-       if (ret)
-               goto out_free_palette;
-
-       rect.left = x;
-       rect.right = x + width;
-       rect.top = y;
-       rect.bottom = y + height;
-
-       ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
-       if (ret) {
-               qxl_release_backoff_reserve_list(release);
-               goto out_free_palette;
-       }
-
-       ret = qxl_image_init(qdev, release, dimage,
-                            (const uint8_t *)src, 0, 0,
-                            width, height, depth, stride);
-       if (ret) {
-               qxl_release_backoff_reserve_list(release);
-               qxl_release_free(qdev, release);
-               return;
-       }
-
-       if (depth == 1) {
-               void *ptr;
-
-               ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
-
-               ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
-               image = ptr;
-               image->u.bitmap.palette =
-                       qxl_bo_physical_address(qdev, palette_bo, 0);
-               qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
-       }
-
-       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
-
-       drawable->u.copy.src_area.top = 0;
-       drawable->u.copy.src_area.bottom = height;
-       drawable->u.copy.src_area.left = 0;
-       drawable->u.copy.src_area.right = width;
-
-       drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
-       drawable->u.copy.scale_mode = 0;
-       drawable->u.copy.mask.flags = 0;
-       drawable->u.copy.mask.pos.x = 0;
-       drawable->u.copy.mask.pos.y = 0;
-       drawable->u.copy.mask.bitmap = 0;
-
-       drawable->u.copy.src_bitmap =
-               qxl_bo_physical_address(qdev, dimage->bo, 0);
-       qxl_release_unmap(qdev, release, &drawable->release_info);
-
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
-       qxl_release_fence_buffer_objects(release);
-
-out_free_palette:
-       qxl_bo_unref(&palette_bo);
-out_free_image:
-       qxl_image_free_objects(qdev, dimage);
-out_free_drawable:
-       if (ret)
-               free_drawable(qdev, release);
-}
-
 /* push a draw command using the given clipping rectangles as
  * the sources from the shadow framebuffer.
  *
@@ -267,7 +121,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
                       struct qxl_bo *bo,
                       unsigned int flags, unsigned int color,
                       struct drm_clip_rect *clips,
-                      unsigned int num_clips, int inc)
+                      unsigned int num_clips, int inc,
+                      uint32_t dumb_shadow_offset)
 {
        /*
         * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
@@ -295,6 +150,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
        if (ret)
                return;
 
+       clips->x1 += dumb_shadow_offset;
+       clips->x2 += dumb_shadow_offset;
+
        left = clips->x1;
        right = clips->x2;
        top = clips->y1;
@@ -342,7 +200,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
                goto out_release_backoff;
 
        ret = qxl_image_init(qdev, release, dimage, surface_base,
-                            left, top, width, height, depth, stride);
+                            left - dumb_shadow_offset,
+                            top, width, height, depth, stride);
        qxl_bo_kunmap(bo);
        if (ret)
                goto out_release_backoff;
@@ -397,89 +256,3 @@ out_free_drawable:
                free_drawable(qdev, release);
 
 }
-
-void qxl_draw_copyarea(struct qxl_device *qdev,
-                      u32 width, u32 height,
-                      u32 sx, u32 sy,
-                      u32 dx, u32 dy)
-{
-       struct qxl_drawable *drawable;
-       struct qxl_rect rect;
-       struct qxl_release *release;
-       int ret;
-
-       ret = alloc_drawable(qdev, &release);
-       if (ret)
-               return;
-
-       /* do a reservation run over all the objects we just allocated */
-       ret = qxl_release_reserve_list(release, true);
-       if (ret)
-               goto out_free_release;
-
-       rect.left = dx;
-       rect.top = dy;
-       rect.right = dx + width;
-       rect.bottom = dy + height;
-       ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
-       if (ret) {
-               qxl_release_backoff_reserve_list(release);
-               goto out_free_release;
-       }
-
-       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
-       drawable->u.copy_bits.src_pos.x = sx;
-       drawable->u.copy_bits.src_pos.y = sy;
-       qxl_release_unmap(qdev, release, &drawable->release_info);
-
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
-       qxl_release_fence_buffer_objects(release);
-
-out_free_release:
-       if (ret)
-               free_drawable(qdev, release);
-}
-
-void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
-{
-       struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
-       struct qxl_rect rect = qxl_draw_fill_rec->rect;
-       uint32_t color = qxl_draw_fill_rec->color;
-       uint16_t rop = qxl_draw_fill_rec->rop;
-       struct qxl_drawable *drawable;
-       struct qxl_release *release;
-       int ret;
-
-       ret = alloc_drawable(qdev, &release);
-       if (ret)
-               return;
-
-       /* do a reservation run over all the objects we just allocated */
-       ret = qxl_release_reserve_list(release, true);
-       if (ret)
-               goto out_free_release;
-
-       ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
-       if (ret) {
-               qxl_release_backoff_reserve_list(release);
-               goto out_free_release;
-       }
-
-       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
-       drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
-       drawable->u.fill.brush.u.color = color;
-       drawable->u.fill.rop_descriptor = rop;
-       drawable->u.fill.mask.flags = 0;
-       drawable->u.fill.mask.pos.x = 0;
-       drawable->u.fill.mask.pos.y = 0;
-       drawable->u.fill.mask.bitmap = 0;
-
-       qxl_release_unmap(qdev, release, &drawable->release_info);
-
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
-       qxl_release_fence_buffer_objects(release);
-
-out_free_release:
-       if (ret)
-               free_drawable(qdev, release);
-}
index 13c8a662f9b4113cc3d60840253c66780aec712f..11a76b6c91657b512d65089aa2009d5f57825c90 100644 (file)
@@ -33,7 +33,8 @@
 
 #include <drm/drmP.h>
 #include <drm/drm.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "qxl_drv.h"
 #include "qxl_object.h"
 
@@ -93,6 +94,8 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto modeset_cleanup;
 
+       drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+       drm_fbdev_generic_setup(&qdev->ddev, 32);
        return 0;
 
 modeset_cleanup:
@@ -242,7 +245,6 @@ static struct pci_driver qxl_pci_driver = {
 
 static struct drm_driver qxl_driver = {
        .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
-                          DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
                           DRIVER_ATOMIC,
 
        .dumb_create = qxl_mode_dumb_create,
index 13a0254b59a1a55fd09663dd32170b478b9513a3..4a0331b3ff7d078205f59a7f4fa5a0f1912c6670 100644 (file)
@@ -84,6 +84,7 @@ struct qxl_bo {
        struct ttm_bo_kmap_obj          kmap;
        unsigned int pin_count;
        void                            *kptr;
+       unsigned int                    map_count;
        int                             type;
 
        /* Constant after initialization */
@@ -130,10 +131,13 @@ struct qxl_mman {
 };
 
 struct qxl_memslot {
+       int             index;
+       const char      *name;
        uint8_t         generation;
        uint64_t        start_phys_addr;
-       uint64_t        end_phys_addr;
+       uint64_t        size;
        uint64_t        high_bits;
+       uint64_t        gpu_offset;
 };
 
 enum {
@@ -216,8 +220,6 @@ struct qxl_device {
        struct qxl_mman         mman;
        struct qxl_gem          gem;
 
-       struct drm_fb_helper    fb_helper;
-
        void *ram_physical;
 
        struct qxl_ring *release_ring;
@@ -226,16 +228,12 @@ struct qxl_device {
 
        struct qxl_ram_header *ram_header;
 
-       unsigned int primary_created:1;
-
-       struct qxl_memslot      *mem_slots;
-       uint8_t         n_mem_slots;
+       struct qxl_bo *primary_bo;
+       struct qxl_bo *dumb_shadow_bo;
+       struct qxl_head *dumb_heads;
 
-       uint8_t         main_mem_slot;
-       uint8_t         surfaces_mem_slot;
-       uint8_t         slot_id_bits;
-       uint8_t         slot_gen_bits;
-       uint64_t        va_slot_mask;
+       struct qxl_memslot main_slot;
+       struct qxl_memslot surfaces_slot;
 
        spinlock_t      release_lock;
        struct idr      release_idr;
@@ -308,30 +306,20 @@ void qxl_ring_free(struct qxl_ring *ring);
 void qxl_ring_init_hdr(struct qxl_ring *ring);
 int qxl_check_idle(struct qxl_ring *ring);
 
-static inline void *
-qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
-{
-       DRM_DEBUG_DRIVER("not implemented (%lu)\n", physical);
-       return 0;
-}
-
 static inline uint64_t
 qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
                        unsigned long offset)
 {
-       int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
-       struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+       struct qxl_memslot *slot =
+               (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+               ? &qdev->main_slot : &qdev->surfaces_slot;
+
+       WARN_ON_ONCE((bo->tbo.offset & slot->gpu_offset) != slot->gpu_offset);
 
        /* TODO - need to hold one of the locks to read tbo.offset */
-       return slot->high_bits | (bo->tbo.offset + offset);
+       return slot->high_bits | (bo->tbo.offset - slot->gpu_offset + offset);
 }
 
-/* qxl_fb.c */
-#define QXLFB_CONN_LIMIT 1
-
-int qxl_fbdev_init(struct qxl_device *qdev);
-void qxl_fbdev_fini(struct qxl_device *qdev);
-
 /* qxl_display.c */
 void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
 int qxl_create_monitors_object(struct qxl_device *qdev);
@@ -392,7 +380,6 @@ void qxl_update_screen(struct qxl_device *qxl);
 /* qxl io operations (qxl_cmd.c) */
 
 void qxl_io_create_primary(struct qxl_device *qdev,
-                          unsigned int offset,
                           struct qxl_bo *bo);
 void qxl_io_destroy_primary(struct qxl_device *qdev);
 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
@@ -437,22 +424,13 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
                          struct qxl_bo **_bo);
 /* qxl drawing commands */
 
-void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
-                       int stride /* filled in if 0 */);
-
 void qxl_draw_dirty_fb(struct qxl_device *qdev,
                       struct drm_framebuffer *fb,
                       struct qxl_bo *bo,
                       unsigned int flags, unsigned int color,
                       struct drm_clip_rect *clips,
-                      unsigned int num_clips, int inc);
-
-void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
-
-void qxl_draw_copyarea(struct qxl_device *qdev,
-                      u32 width, u32 height,
-                      u32 sx, u32 sy,
-                      u32 dx, u32 dy);
+                      unsigned int num_clips, int inc,
+                      uint32_t dumb_shadow_offset);
 
 void qxl_release_free(struct qxl_device *qdev,
                      struct qxl_release *release);
@@ -485,9 +463,6 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
 int qxl_irq_init(struct qxl_device *qdev);
 irqreturn_t qxl_irq_handler(int irq, void *arg);
 
-/* qxl_fb.c */
-bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
-
 int qxl_debugfs_add_files(struct qxl_device *qdev,
                          struct drm_info_list *files,
                          unsigned int nfiles);
@@ -497,8 +472,7 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
 void qxl_surface_id_dealloc(struct qxl_device *qdev,
                            uint32_t surface_id);
 int qxl_hw_surface_alloc(struct qxl_device *qdev,
-                        struct qxl_bo *surf,
-                        struct ttm_mem_reg *mem);
+                        struct qxl_bo *surf);
 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
                           struct qxl_bo *surf);
 
index e3765739c396b0e6f734b7c36c52c5bf40dcc5c0..272d19b677d8f9a10ed118915d2c65b7dcc15696 100644 (file)
@@ -59,7 +59,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
        surf.stride = pitch;
        surf.format = format;
        r = qxl_gem_object_create_with_handle(qdev, file_priv,
-                                             QXL_GEM_DOMAIN_VRAM,
+                                             QXL_GEM_DOMAIN_SURFACE,
                                              args->size, &surf, &qobj,
                                              &handle);
        if (r)
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
deleted file mode 100644 (file)
index a819d24..0000000
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright Â© 2013 Red Hat
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *     David Airlie
- */
-#include <linux/module.h>
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-#include "qxl_drv.h"
-
-#include "qxl_object.h"
-
-static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
-                             struct qxl_device *qdev, struct fb_info *info,
-                             const struct fb_image *image)
-{
-       qxl_fb_image->qdev = qdev;
-       if (info) {
-               qxl_fb_image->visual = info->fix.visual;
-               if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
-                   qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
-                       memcpy(&qxl_fb_image->pseudo_palette,
-                              info->pseudo_palette,
-                              sizeof(qxl_fb_image->pseudo_palette));
-       } else {
-                /* fallback */
-               if (image->depth == 1)
-                       qxl_fb_image->visual = FB_VISUAL_MONO10;
-               else
-                       qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
-       }
-       if (image) {
-               memcpy(&qxl_fb_image->fb_image, image,
-                      sizeof(qxl_fb_image->fb_image));
-       }
-}
-
-static struct fb_ops qxlfb_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_fillrect = drm_fb_helper_sys_fillrect,
-       .fb_copyarea = drm_fb_helper_sys_copyarea,
-       .fb_imageblit = drm_fb_helper_sys_imageblit,
-};
-
-static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
-{
-       struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
-
-       qxl_bo_kunmap(qbo);
-       qxl_bo_unpin(qbo);
-
-       drm_gem_object_put_unlocked(gobj);
-}
-
-static int qxlfb_create_pinned_object(struct qxl_device *qdev,
-                                     const struct drm_mode_fb_cmd2 *mode_cmd,
-                                     struct drm_gem_object **gobj_p)
-{
-       struct drm_gem_object *gobj = NULL;
-       struct qxl_bo *qbo = NULL;
-       int ret;
-       int aligned_size, size;
-       int height = mode_cmd->height;
-
-       size = mode_cmd->pitches[0] * height;
-       aligned_size = ALIGN(size, PAGE_SIZE);
-       /* TODO: unallocate and reallocate surface0 for real. Hack to just
-        * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
-       ret = qxl_gem_object_create(qdev, aligned_size, 0,
-                                   QXL_GEM_DOMAIN_SURFACE,
-                                   false, /* is discardable */
-                                   false, /* is kernel (false means device) */
-                                   NULL,
-                                   &gobj);
-       if (ret) {
-               pr_err("failed to allocate framebuffer (%d)\n",
-                      aligned_size);
-               return -ENOMEM;
-       }
-       qbo = gem_to_qxl_bo(gobj);
-
-       qbo->surf.width = mode_cmd->width;
-       qbo->surf.height = mode_cmd->height;
-       qbo->surf.stride = mode_cmd->pitches[0];
-       qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
-
-       ret = qxl_bo_pin(qbo);
-       if (ret) {
-               goto out_unref;
-       }
-       ret = qxl_bo_kmap(qbo, NULL);
-
-       if (ret)
-               goto out_unref;
-
-       *gobj_p = gobj;
-       return 0;
-out_unref:
-       qxlfb_destroy_pinned_object(gobj);
-       *gobj_p = NULL;
-       return ret;
-}
-
-/*
- * FIXME
- * It should not be necessary to have a special dirty() callback for fbdev.
- */
-static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
-                                  struct drm_file *file_priv,
-                                  unsigned int flags, unsigned int color,
-                                  struct drm_clip_rect *clips,
-                                  unsigned int num_clips)
-{
-       struct qxl_device *qdev = fb->dev->dev_private;
-       struct fb_info *info = qdev->fb_helper.fbdev;
-       struct qxl_fb_image qxl_fb_image;
-       struct fb_image *image = &qxl_fb_image.fb_image;
-
-       /* TODO: hard coding 32 bpp */
-       int stride = fb->pitches[0];
-
-       /*
-        * we are using a shadow draw buffer, at qdev->surface0_shadow
-        */
-       image->dx = clips->x1;
-       image->dy = clips->y1;
-       image->width = clips->x2 - clips->x1;
-       image->height = clips->y2 - clips->y1;
-       image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
-                                        warnings */
-       image->bg_color = 0;
-       image->depth = 32;           /* TODO: take from somewhere? */
-       image->cmap.start = 0;
-       image->cmap.len = 0;
-       image->cmap.red = NULL;
-       image->cmap.green = NULL;
-       image->cmap.blue = NULL;
-       image->cmap.transp = NULL;
-       image->data = info->screen_base + (clips->x1 * 4) + (stride * clips->y1);
-
-       qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
-       qxl_draw_opaque_fb(&qxl_fb_image, stride);
-
-       return 0;
-}
-
-static const struct drm_framebuffer_funcs qxlfb_fb_funcs = {
-       .destroy = drm_gem_fb_destroy,
-       .create_handle = drm_gem_fb_create_handle,
-       .dirty = qxlfb_framebuffer_dirty,
-};
-
-static int qxlfb_create(struct drm_fb_helper *helper,
-                       struct drm_fb_helper_surface_size *sizes)
-{
-       struct qxl_device *qdev =
-               container_of(helper, struct qxl_device, fb_helper);
-       struct fb_info *info;
-       struct drm_framebuffer *fb = NULL;
-       struct drm_mode_fb_cmd2 mode_cmd;
-       struct drm_gem_object *gobj = NULL;
-       struct qxl_bo *qbo = NULL;
-       int ret;
-       int bpp = sizes->surface_bpp;
-       int depth = sizes->surface_depth;
-       void *shadow;
-
-       mode_cmd.width = sizes->surface_width;
-       mode_cmd.height = sizes->surface_height;
-
-       mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
-       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
-       ret = qxlfb_create_pinned_object(qdev, &mode_cmd, &gobj);
-       if (ret < 0)
-               return ret;
-
-       qbo = gem_to_qxl_bo(gobj);
-       DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width,
-                        mode_cmd.height, mode_cmd.pitches[0]);
-
-       shadow = vmalloc(array_size(mode_cmd.pitches[0], mode_cmd.height));
-       /* TODO: what's the usual response to memory allocation errors? */
-       BUG_ON(!shadow);
-       DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
-                        qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo),
-                        qbo->kptr, shadow);
-
-       info = drm_fb_helper_alloc_fbi(helper);
-       if (IS_ERR(info)) {
-               ret = PTR_ERR(info);
-               goto out_unref;
-       }
-
-       info->par = helper;
-
-       fb = drm_gem_fbdev_fb_create(&qdev->ddev, sizes, 64, gobj,
-                                    &qxlfb_fb_funcs);
-       if (IS_ERR(fb)) {
-               DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb));
-               ret = PTR_ERR(fb);
-               goto out_unref;
-       }
-
-       /* setup helper with fb data */
-       qdev->fb_helper.fb = fb;
-
-       strcpy(info->fix.id, "qxldrmfb");
-
-       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
-       info->fbops = &qxlfb_ops;
-
-       /*
-        * TODO: using gobj->size in various places in this function. Not sure
-        * what the difference between the different sizes is.
-        */
-       info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
-       info->fix.smem_len = gobj->size;
-       info->screen_base = shadow;
-       info->screen_size = gobj->size;
-
-       drm_fb_helper_fill_var(info, &qdev->fb_helper, sizes->fb_width,
-                              sizes->fb_height);
-
-       /* setup aperture base/size for vesafb takeover */
-       info->apertures->ranges[0].base = qdev->ddev.mode_config.fb_base;
-       info->apertures->ranges[0].size = qdev->vram_size;
-
-       info->fix.mmio_start = 0;
-       info->fix.mmio_len = 0;
-
-       if (info->screen_base == NULL) {
-               ret = -ENOSPC;
-               goto out_unref;
-       }
-
-       /* XXX error handling. */
-       drm_fb_helper_defio_init(helper);
-
-       DRM_INFO("fb mappable at 0x%lX, size %lu\n",  info->fix.smem_start, (unsigned long)info->screen_size);
-       DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n",
-                fb->format->depth, fb->pitches[0], fb->width, fb->height);
-       return 0;
-
-out_unref:
-       if (qbo) {
-               qxl_bo_kunmap(qbo);
-               qxl_bo_unpin(qbo);
-       }
-       drm_gem_object_put_unlocked(gobj);
-       return ret;
-}
-
-static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
-       .fb_probe = qxlfb_create,
-};
-
-int qxl_fbdev_init(struct qxl_device *qdev)
-{
-       return drm_fb_helper_fbdev_setup(&qdev->ddev, &qdev->fb_helper,
-                                        &qxl_fb_helper_funcs, 32,
-                                        QXLFB_CONN_LIMIT);
-}
-
-void qxl_fbdev_fini(struct qxl_device *qdev)
-{
-       struct fb_info *fbi = qdev->fb_helper.fbdev;
-       void *shadow = fbi ? fbi->screen_buffer : NULL;
-
-       drm_fb_helper_fbdev_teardown(&qdev->ddev);
-       vfree(shadow);
-}
index 15238a413f9d7809cd4f937a943b8ac131472564..bee61fa2c9bcb33f2d6166d1f5bf6aea22b38830 100644 (file)
@@ -26,7 +26,7 @@
 #include "qxl_drv.h"
 #include "qxl_object.h"
 
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/io-mapping.h>
 
 int qxl_log_level;
@@ -53,40 +53,47 @@ static bool qxl_check_device(struct qxl_device *qdev)
        return true;
 }
 
-static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
-                         struct qxl_memslot *slot)
+static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
 {
        qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
-       qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
-       qxl_io_memslot_add(qdev, slot_index);
+       qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
+       qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
 }
 
-static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
-       unsigned long start_phys_addr, unsigned long end_phys_addr)
+static void setup_slot(struct qxl_device *qdev,
+                      struct qxl_memslot *slot,
+                      unsigned int slot_index,
+                      const char *slot_name,
+                      unsigned long start_phys_addr,
+                      unsigned long size)
 {
        uint64_t high_bits;
-       struct qxl_memslot *slot;
-       uint8_t slot_index;
 
-       slot_index = qdev->rom->slots_start + slot_index_offset;
-       slot = &qdev->mem_slots[slot_index];
+       slot->index = slot_index;
+       slot->name = slot_name;
        slot->start_phys_addr = start_phys_addr;
-       slot->end_phys_addr = end_phys_addr;
+       slot->size = size;
 
-       setup_hw_slot(qdev, slot_index, slot);
+       setup_hw_slot(qdev, slot);
 
        slot->generation = qdev->rom->slot_generation;
-       high_bits = slot_index << qdev->slot_gen_bits;
+       high_bits = (qdev->rom->slots_start + slot->index)
+               << qdev->rom->slot_gen_bits;
        high_bits |= slot->generation;
-       high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
+       high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
        slot->high_bits = high_bits;
-       return slot_index;
+
+       DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx, gpu_offset 0x%lx\n",
+                slot->index, slot->name,
+                (unsigned long)slot->start_phys_addr,
+                (unsigned long)slot->size,
+                (unsigned long)slot->gpu_offset);
 }
 
 void qxl_reinit_memslots(struct qxl_device *qdev)
 {
-       setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
-       setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
+       setup_hw_slot(qdev, &qdev->main_slot);
+       setup_hw_slot(qdev, &qdev->surfaces_slot);
 }
 
 static void qxl_gc_work(struct work_struct *work)
@@ -229,23 +236,6 @@ int qxl_device_init(struct qxl_device *qdev,
                r = -ENOMEM;
                goto cursor_ring_free;
        }
-       /* TODO - slot initialization should happen on reset. where is our
-        * reset handler? */
-       qdev->n_mem_slots = qdev->rom->slots_end;
-       qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
-       qdev->slot_id_bits = qdev->rom->slot_id_bits;
-       qdev->va_slot_mask =
-               (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
-
-       qdev->mem_slots =
-               kmalloc_array(qdev->n_mem_slots, sizeof(struct qxl_memslot),
-                             GFP_KERNEL);
-
-       if (!qdev->mem_slots) {
-               DRM_ERROR("Unable to alloc mem slots\n");
-               r = -ENOMEM;
-               goto release_ring_free;
-       }
 
        idr_init(&qdev->release_idr);
        spin_lock_init(&qdev->release_idr_lock);
@@ -264,33 +254,24 @@ int qxl_device_init(struct qxl_device *qdev,
        r = qxl_irq_init(qdev);
        if (r) {
                DRM_ERROR("Unable to init qxl irq\n");
-               goto mem_slots_free;
+               goto release_ring_free;
        }
 
        /*
         * Note that virtual is surface0. We rely on the single ioremap done
         * before.
         */
-       qdev->main_mem_slot = setup_slot(qdev, 0,
-               (unsigned long)qdev->vram_base,
-               (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
-       qdev->surfaces_mem_slot = setup_slot(qdev, 1,
-               (unsigned long)qdev->surfaceram_base,
-               (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
-       DRM_INFO("main mem slot %d [%lx,%x]\n",
-                qdev->main_mem_slot,
-                (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
-       DRM_INFO("surface mem slot %d [%lx,%lx]\n",
-                qdev->surfaces_mem_slot,
-                (unsigned long)qdev->surfaceram_base,
-                (unsigned long)qdev->surfaceram_size);
+       setup_slot(qdev, &qdev->main_slot, 0, "main",
+                  (unsigned long)qdev->vram_base,
+                  (unsigned long)qdev->rom->ram_header_offset);
+       setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
+                  (unsigned long)qdev->surfaceram_base,
+                  (unsigned long)qdev->surfaceram_size);
 
        INIT_WORK(&qdev->gc_work, qxl_gc_work);
 
        return 0;
 
-mem_slots_free:
-       kfree(qdev->mem_slots);
 release_ring_free:
        qxl_ring_free(qdev->release_ring);
 cursor_ring_free:
index 91f3bbc73ecc01fa9d41ba3270b403963d024ac0..4928fa60294435dbf6e7617905c041e653287521 100644 (file)
@@ -36,6 +36,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
 
        qxl_surface_evict(qdev, bo, false);
+       WARN_ON_ONCE(bo->map_count > 0);
        mutex_lock(&qdev->gem.mutex);
        list_del_init(&bo->list);
        mutex_unlock(&qdev->gem.mutex);
@@ -60,8 +61,10 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
        qbo->placement.busy_placement = qbo->placements;
        if (domain == QXL_GEM_DOMAIN_VRAM)
                qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
-       if (domain == QXL_GEM_DOMAIN_SURFACE)
+       if (domain == QXL_GEM_DOMAIN_SURFACE) {
                qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
+               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
+       }
        if (domain == QXL_GEM_DOMAIN_CPU)
                qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
        if (!c)
@@ -129,6 +132,7 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
        if (bo->kptr) {
                if (ptr)
                        *ptr = bo->kptr;
+               bo->map_count++;
                return 0;
        }
        r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
@@ -137,6 +141,7 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
        bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
        if (ptr)
                *ptr = bo->kptr;
+       bo->map_count = 1;
        return 0;
 }
 
@@ -178,6 +183,9 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
 {
        if (bo->kptr == NULL)
                return;
+       bo->map_count--;
+       if (bo->map_count > 0)
+               return;
        bo->kptr = NULL;
        ttm_bo_kunmap(&bo->kmap);
 }
@@ -332,7 +340,7 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
                if (ret)
                        return ret;
 
-               ret = qxl_hw_surface_alloc(qdev, bo, NULL);
+               ret = qxl_hw_surface_alloc(qdev, bo);
                if (ret)
                        return ret;
        }
index a55dece118b292a5b6958a64b5a5c47d12d82033..22e1faf0473aa625b1d72fdbda8c12f29d28d4af 100644 (file)
  */
 
 #include "qxl_drv.h"
+#include "qxl_object.h"
 
 /* Empty Implementations as there should not be any other driver for a virtual
  * device that might share buffers with qxl */
 
 int qxl_gem_prime_pin(struct drm_gem_object *obj)
 {
-       WARN_ONCE(1, "not implemented");
-       return -ENOSYS;
+       struct qxl_bo *bo = gem_to_qxl_bo(obj);
+
+       return qxl_bo_pin(bo);
 }
 
 void qxl_gem_prime_unpin(struct drm_gem_object *obj)
 {
-       WARN_ONCE(1, "not implemented");
+       struct qxl_bo *bo = gem_to_qxl_bo(obj);
+
+       qxl_bo_unpin(bo);
 }
 
 struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
@@ -54,13 +58,22 @@ struct drm_gem_object *qxl_gem_prime_import_sg_table(
 
 void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
 {
-       WARN_ONCE(1, "not implemented");
-       return ERR_PTR(-ENOSYS);
+       struct qxl_bo *bo = gem_to_qxl_bo(obj);
+       void *ptr;
+       int ret;
+
+       ret = qxl_bo_kmap(bo, &ptr);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       return ptr;
 }
 
 void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 {
-       WARN_ONCE(1, "not implemented");
+       struct qxl_bo *bo = gem_to_qxl_bo(obj);
+
+       qxl_bo_kunmap(bo);
 }
 
 int qxl_gem_prime_mmap(struct drm_gem_object *obj,
index 886f61e94f24470c37feaf224974fb28d43d2e5b..92f5db5b296f5275e22ea14f4ec700e894193ee3 100644 (file)
@@ -100,6 +100,11 @@ static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                             struct ttm_mem_type_manager *man)
 {
+       struct qxl_device *qdev = qxl_get_qdev(bdev);
+       unsigned int gpu_offset_shift =
+               64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits + 8);
+       struct qxl_memslot *slot;
+
        switch (type) {
        case TTM_PL_SYSTEM:
                /* System memory */
@@ -110,8 +115,11 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
        case TTM_PL_VRAM:
        case TTM_PL_PRIV:
                /* "On-card" video ram */
+               slot = (type == TTM_PL_VRAM) ?
+                       &qdev->main_slot : &qdev->surfaces_slot;
+               slot->gpu_offset = (uint64_t)type << gpu_offset_shift;
                man->func = &ttm_bo_manager_func;
-               man->gpu_offset = 0;
+               man->gpu_offset = slot->gpu_offset;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_MASK_CACHING;
@@ -196,7 +204,7 @@ static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
  * TTM backend functions.
  */
 struct qxl_ttm_tt {
-       struct ttm_dma_tt               ttm;
+       struct ttm_tt                   ttm;
        struct qxl_device               *qdev;
        u64                             offset;
 };
@@ -225,7 +233,7 @@ static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
 {
        struct qxl_ttm_tt *gtt = (void *)ttm;
 
-       ttm_dma_tt_fini(&gtt->ttm);
+       ttm_tt_fini(&gtt->ttm);
        kfree(gtt);
 }
 
@@ -245,13 +253,13 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
        gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
        if (gtt == NULL)
                return NULL;
-       gtt->ttm.ttm.func = &qxl_backend_func;
+       gtt->ttm.func = &qxl_backend_func;
        gtt->qdev = qdev;
-       if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
+       if (ttm_tt_init(&gtt->ttm, bo, page_flags)) {
                kfree(gtt);
                return NULL;
        }
-       return &gtt->ttm.ttm;
+       return &gtt->ttm;
 }
 
 static void qxl_move_null(struct ttm_buffer_object *bo,
index 0d2b7e42b3a7d876c6879c24017d3e5cc2c08cc6..4b1a505ab3535c90223d47f25efdd170cea2b83e 100644 (file)
@@ -57,7 +57,7 @@ static const struct file_operations r128_driver_fops = {
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_LEGACY |
-           DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+           DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ,
        .dev_priv_size = sizeof(drm_r128_buf_priv_t),
        .load = r128_driver_load,
        .preclose = r128_driver_preclose,
index e55cbeee7a5376bb8008042dd53a160bc6e06f95..ac98ad5618700d54564cfb51cba5d25f4b8b9464 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
+#include <drm/drm_util.h>
+
 #define ATOM_DEBUG
 
 #include "atom.h"
index 8d3251a10cd49edd6ddbbd2ea15ba0faca219ad3..224cc21bbe3815283b4e27094392de56476e8966 100644 (file)
@@ -29,6 +29,7 @@
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "radeon.h"
 #include "radeon_acpi.h"
 #include "atom.h"
index 770e31f5fd1b0c720762aeb8b9eee949c5c01aa0..96f71114237a869e7fa7d92068f3916712511ee2 100644 (file)
@@ -516,21 +516,17 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
        if (!connector)
                return -EINVAL;
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
        if (err < 0) {
                DRM_ERROR("failed to setup AVI infoframe: %d\n", err);
                return err;
        }
 
        if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
-               if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
-                       if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
-                               frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
-                       else
-                               frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
-               } else {
-                       frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
-               }
+               drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode,
+                                                  radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB ?
+                                                  HDMI_QUANTIZATION_RANGE_LIMITED :
+                                                  HDMI_QUANTIZATION_RANGE_FULL);
        }
 
        err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
index 414642e5b7a3110353bafb3ad022f0d7fee4e78f..de1745adccccb89aaeab237951676ec3d7206de3 100644 (file)
@@ -28,6 +28,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 #include "radeon_audio.h"
index 59c8a6647ff210346b4e4bd7916b2c5909ba6b22..53f29a115104d8001a8f6ba5359a167e8ee564c8 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_cache.h>
 #include <drm/radeon_drm.h>
 #include <linux/pm_runtime.h>
index 9d3ac8b981dab3d324e2864495c5fe739f4d675f..aa898c699101c90e3274cc4c193f52c3847bbf84 100644 (file)
@@ -35,6 +35,7 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_edid.h>
 
 #include <linux/gcd.h>
@@ -1646,7 +1647,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
        if (rdev->mode_info.mode_config_initialized) {
                drm_kms_helper_poll_fini(rdev->ddev);
                radeon_hpd_fini(rdev);
-               drm_crtc_force_disable_all(rdev->ddev);
+               drm_helper_force_disable_all(rdev->ddev);
                radeon_fbdev_fini(rdev);
                radeon_afmt_fini(rdev);
                drm_mode_config_cleanup(rdev->ddev);
index a0c70e27ab65a48ad78fd9e48ef636c1e12e03cc..8d85540bbb43677de31d8e06f3cb50afc66b6656 100644 (file)
@@ -3,6 +3,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_dp_mst_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "radeon.h"
 #include "atom.h"
index 99c63eeb2866ab0834bbfc8899552994a717ca05..2e96c886392bd1b7b2aeaafa74ef5b2750a36c20 100644 (file)
@@ -43,6 +43,7 @@
 #include <drm/drm_fb_helper.h>
 
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 /*
  * KMS wrapper.
@@ -533,9 +534,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
 
 static struct drm_driver kms_driver = {
        .driver_features =
-           DRIVER_USE_AGP |
-           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
-           DRIVER_PRIME | DRIVER_RENDER,
+           DRIVER_USE_AGP | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
        .load = radeon_driver_load_kms,
        .open = radeon_driver_open_kms,
        .postclose = radeon_driver_postclose_kms,
index afaf10db47ccbc6a4325020a01853dadcce7b11f..1d5e3ba7383e29f62c58aaef3426ca613fd8aba0 100644 (file)
@@ -27,6 +27,7 @@
  */
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/radeon_drm.h>
 #include "radeon_reg.h"
 #include "radeon.h"
index 222a1fa41d7c96c2e1853fcddfd3f6d64f56a196..7e3257e8fd5604f72366aa8febc8e0de1198d920 100644 (file)
@@ -24,6 +24,7 @@
  *          Alex Deucher
  */
 #include <drm/drmP.h>
+#include <drm/drm_util.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
 #include "radeon.h"
index 225141656e1903758e096561521629e7154bf78c..7c36e2777a154e98c233a2eb153e4be4c9811299 100644 (file)
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
        depends on DRM && OF
        depends on ARM || ARM64
        depends on ARCH_RENESAS || COMPILE_TEST
+       imply DRM_RCAR_LVDS
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
index 90dacab67be5a8629525ed30665d58b93eeb2878..96175d48a9022494105cfd534b8d6f4c03631aff 100644 (file)
 #include <linux/mutex.h>
 #include <linux/sys_soc.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
 
 #include "rcar_du_crtc.h"
 #include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
 #include "rcar_du_kms.h"
 #include "rcar_du_plane.h"
 #include "rcar_du_regs.h"
 #include "rcar_du_vsp.h"
+#include "rcar_lvds.h"
 
 static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
 {
@@ -316,26 +316,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
        rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
 }
 
-void rcar_du_crtc_route_output(struct drm_crtc *crtc,
-                              enum rcar_du_output output)
-{
-       struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
-       struct rcar_du_device *rcdu = rcrtc->group->dev;
-
-       /*
-        * Store the route from the CRTC output to the DU output. The DU will be
-        * configured when starting the CRTC.
-        */
-       rcrtc->outputs |= BIT(output);
-
-       /*
-        * Store RGB routing to DPAD0, the hardware will be configured when
-        * starting the CRTC.
-        */
-       if (output == RCAR_DU_OUTPUT_DPAD0)
-               rcdu->dpad0_source = rcrtc->index;
-}
-
 static unsigned int plane_zpos(struct rcar_du_plane *plane)
 {
        return plane->plane.state->normalized_zpos;
@@ -655,12 +635,49 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
  * CRTC Functions
  */
 
+static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *state)
+{
+       struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
+       struct drm_encoder *encoder;
+
+       /* Store the routes from the CRTC output to the DU outputs. */
+       rstate->outputs = 0;
+
+       drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
+               struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+               rstate->outputs |= BIT(renc->output);
+       }
+
+       return 0;
+}
+
 static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
                                       struct drm_crtc_state *old_state)
 {
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+       struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
 
        rcar_du_crtc_get(rcrtc);
+
+       /*
+        * On D3/E3 the dot clock is provided by the LVDS encoder attached to
+        * the DU channel. We need to enable its clock output explicitly if
+        * the LVDS output is disabled.
+        */
+       if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
+           rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+               struct rcar_du_encoder *encoder =
+                       rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+               const struct drm_display_mode *mode =
+                       &crtc->state->adjusted_mode;
+
+               rcar_lvds_clk_enable(encoder->base.bridge,
+                                    mode->clock * 1000);
+       }
+
        rcar_du_crtc_start(rcrtc);
 }
 
@@ -668,18 +685,30 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
                                        struct drm_crtc_state *old_state)
 {
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+       struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
 
        rcar_du_crtc_stop(rcrtc);
        rcar_du_crtc_put(rcrtc);
 
+       if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
+           rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+               struct rcar_du_encoder *encoder =
+                       rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+
+               /*
+                * Disable the LVDS clock output, see
+                * rcar_du_crtc_atomic_enable().
+                */
+               rcar_lvds_clk_disable(encoder->base.bridge);
+       }
+
        spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event) {
                drm_crtc_send_vblank_event(crtc, crtc->state->event);
                crtc->state->event = NULL;
        }
        spin_unlock_irq(&crtc->dev->event_lock);
-
-       rcrtc->outputs = 0;
 }
 
 static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -755,6 +784,7 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+       .atomic_check = rcar_du_crtc_atomic_check,
        .atomic_begin = rcar_du_crtc_atomic_begin,
        .atomic_flush = rcar_du_crtc_atomic_flush,
        .atomic_enable = rcar_du_crtc_atomic_enable,
index 59ac6e7d22c932777d1ca96dcee8f537d9d24ea1..bcb35b0b761202008af2a0df010154e2d747c316 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 
 #include <media/vsp1.h>
@@ -37,7 +36,6 @@ struct rcar_du_vsp;
  * @vblank_lock: protects vblank_wait and vblank_count
  * @vblank_wait: wait queue used to signal vertical blanking
  * @vblank_count: number of vertical blanking interrupts to wait for
- * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
  * @group: CRTC group this CRTC belongs to
  * @vsp: VSP feeding video to this CRTC
  * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
@@ -61,8 +59,6 @@ struct rcar_du_crtc {
        wait_queue_head_t vblank_wait;
        unsigned int vblank_count;
 
-       unsigned int outputs;
-
        struct rcar_du_group *group;
        struct rcar_du_vsp *vsp;
        unsigned int vsp_pipe;
@@ -77,11 +73,13 @@ struct rcar_du_crtc {
  * struct rcar_du_crtc_state - Driver-specific CRTC state
  * @state: base DRM CRTC state
  * @crc: CRC computation configuration
+ * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
  */
 struct rcar_du_crtc_state {
        struct drm_crtc_state state;
 
        struct vsp1_du_crc_config crc;
+       unsigned int outputs;
 };
 
 #define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state)
@@ -102,8 +100,6 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
 void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
 
-void rcar_du_crtc_route_output(struct drm_crtc *crtc,
-                              enum rcar_du_output output);
 void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc);
 
 void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set);
index f50a3b1864bbe199c97d9df4a6ac018b12da7e67..abd70d2931b01c50048c1f68abab7e8063bc2896 100644 (file)
 #include <linux/slab.h>
 #include <linux/wait.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "rcar_du_drv.h"
 #include "rcar_du_kms.h"
@@ -36,7 +35,6 @@
 static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .channels_mask = BIT(1) | BIT(0),
@@ -59,7 +57,6 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
 static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .channels_mask = BIT(1) | BIT(0),
@@ -81,7 +78,6 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
 static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .channels_mask = BIT(1) | BIT(0),
@@ -105,8 +101,34 @@ static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
        },
 };
 
+static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
+       .gen = 3,
+       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+                 | RCAR_DU_FEATURE_VSP1_SOURCE,
+       .channels_mask = BIT(1) | BIT(0),
+       .routes = {
+               /*
+                * R8A774C0 has one RGB output and two LVDS outputs
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(0) | BIT(1),
+                       .port = 0,
+               },
+               [RCAR_DU_OUTPUT_LVDS0] = {
+                       .possible_crtcs = BIT(0),
+                       .port = 1,
+               },
+               [RCAR_DU_OUTPUT_LVDS1] = {
+                       .possible_crtcs = BIT(1),
+                       .port = 2,
+               },
+       },
+       .num_lvds = 2,
+       .lvds_clk_mask =  BIT(1) | BIT(0),
+};
+
 static const struct rcar_du_device_info rcar_du_r8a7779_info = {
-       .gen = 2,
+       .gen = 1,
        .features = RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .channels_mask = BIT(1) | BIT(0),
@@ -129,7 +151,6 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
 static const struct rcar_du_device_info rcar_du_r8a7790_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .quirks = RCAR_DU_QUIRK_ALIGN_128B,
@@ -159,7 +180,6 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
 static const struct rcar_du_device_info rcar_du_r8a7791_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .channels_mask = BIT(1) | BIT(0),
@@ -183,7 +203,6 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
 static const struct rcar_du_device_info rcar_du_r8a7792_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .channels_mask = BIT(1) | BIT(0),
@@ -203,7 +222,6 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
 static const struct rcar_du_device_info rcar_du_r8a7794_info = {
        .gen = 2,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
        .channels_mask = BIT(1) | BIT(0),
@@ -226,7 +244,6 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
 static const struct rcar_du_device_info rcar_du_r8a7795_info = {
        .gen = 3,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_VSP1_SOURCE
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
@@ -260,7 +277,6 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
 static const struct rcar_du_device_info rcar_du_r8a7796_info = {
        .gen = 3,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_VSP1_SOURCE
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
@@ -290,7 +306,6 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
 static const struct rcar_du_device_info rcar_du_r8a77965_info = {
        .gen = 3,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_VSP1_SOURCE
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
@@ -320,7 +335,6 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = {
 static const struct rcar_du_device_info rcar_du_r8a77970_info = {
        .gen = 3,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_VSP1_SOURCE
                  | RCAR_DU_FEATURE_INTERLACED
                  | RCAR_DU_FEATURE_TVM_SYNC,
@@ -342,7 +356,6 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
 static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
        .gen = 3,
        .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
-                 | RCAR_DU_FEATURE_EXT_CTRL_REGS
                  | RCAR_DU_FEATURE_VSP1_SOURCE,
        .channels_mask = BIT(1) | BIT(0),
        .routes = {
@@ -372,6 +385,7 @@ static const struct of_device_id rcar_du_of_table[] = {
        { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
        { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
        { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
+       { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
        { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
        { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
        { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
index a68da79b424ee50308ad18a7447d03f6fba300c2..1327cd0df90a4b2f9fcf9632fc379aceba9f2e5d 100644 (file)
 struct clk;
 struct device;
 struct drm_device;
+struct drm_property;
 struct rcar_du_device;
+struct rcar_du_encoder;
 
 #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0)  /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_EXT_CTRL_REGS  BIT(1)  /* Has extended control registers */
-#define RCAR_DU_FEATURE_VSP1_SOURCE    BIT(2)  /* Has inputs from VSP1 */
-#define RCAR_DU_FEATURE_INTERLACED     BIT(3)  /* HW supports interlaced */
-#define RCAR_DU_FEATURE_TVM_SYNC       BIT(4)  /* Has TV switch/sync modes */
+#define RCAR_DU_FEATURE_VSP1_SOURCE    BIT(1)  /* Has inputs from VSP1 */
+#define RCAR_DU_FEATURE_INTERLACED     BIT(2)  /* HW supports interlaced */
+#define RCAR_DU_FEATURE_TVM_SYNC       BIT(3)  /* Has TV switch/sync modes */
 
 #define RCAR_DU_QUIRK_ALIGN_128B       BIT(0)  /* Align pitches to 128 bytes */
 
@@ -81,6 +82,8 @@ struct rcar_du_device {
        struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
        unsigned int num_crtcs;
 
+       struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
+
        struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
        struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
 
@@ -89,6 +92,7 @@ struct rcar_du_device {
        } props;
 
        unsigned int dpad0_source;
+       unsigned int dpad1_source;
        unsigned int vspd1_sink;
 };
 
index 1877764bd6d93fa566f3122bc55b845e07de28c0..8ee4e762f4e5599715973b61f945703ace6ae723 100644 (file)
@@ -9,9 +9,8 @@
 
 #include <linux/export.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_panel.h>
 
 #include "rcar_du_drv.h"
  * Encoder
  */
 
-static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
-                                    struct drm_crtc_state *crtc_state,
-                                    struct drm_connector_state *conn_state)
-{
-       struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
-       rcar_du_crtc_route_output(crtc_state->crtc, renc->output);
-}
-
 static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-       .atomic_mode_set = rcar_du_encoder_mode_set,
 };
 
 static const struct drm_encoder_funcs encoder_funcs = {
@@ -41,8 +30,7 @@ static const struct drm_encoder_funcs encoder_funcs = {
 
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                         enum rcar_du_output output,
-                        struct device_node *enc_node,
-                        struct device_node *con_node)
+                        struct device_node *enc_node)
 {
        struct rcar_du_encoder *renc;
        struct drm_encoder *encoder;
@@ -53,6 +41,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
        if (renc == NULL)
                return -ENOMEM;
 
+       rcdu->encoders[output] = renc;
        renc->output = output;
        encoder = rcar_encoder_to_drm_encoder(renc);
 
index ce3cbc85695eb985b1f0fecdf83082a329cab5d8..df9be4524301942bafe388da0eed0d275fcc9a4b 100644 (file)
 #ifndef __RCAR_DU_ENCODER_H__
 #define __RCAR_DU_ENCODER_H__
 
-#include <drm/drm_crtc.h>
 #include <drm/drm_encoder.h>
 
-struct drm_panel;
 struct rcar_du_device;
 
 struct rcar_du_encoder {
@@ -28,7 +26,6 @@ struct rcar_du_encoder {
 
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                         enum rcar_du_output output,
-                        struct device_node *enc_node,
-                        struct device_node *con_node);
+                        struct device_node *enc_node);
 
 #endif /* __RCAR_DU_ENCODER_H__ */
index cebf313c6e1f9290dd8817099fb6388d25649032..9eee47969e77818805ba5b9baf595eab05f9dc67 100644 (file)
@@ -147,7 +147,7 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
 
        rcar_du_group_setup_pins(rgrp);
 
-       if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
+       if (rcdu->info->gen >= 2) {
                rcar_du_group_setup_defr8(rgrp);
                rcar_du_group_setup_didsr(rgrp);
        }
@@ -262,7 +262,7 @@ int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
        unsigned int index;
        int ret;
 
-       if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS))
+       if (rcdu->info->gen < 2)
                return 0;
 
        /*
@@ -287,9 +287,50 @@ int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
        return 0;
 }
 
+static void rcar_du_group_set_dpad_levels(struct rcar_du_group *rgrp)
+{
+       static const u32 doflr_values[2] = {
+               DOFLR_HSYCFL0 | DOFLR_VSYCFL0 | DOFLR_ODDFL0 |
+               DOFLR_DISPFL0 | DOFLR_CDEFL0  | DOFLR_RGBFL0,
+               DOFLR_HSYCFL1 | DOFLR_VSYCFL1 | DOFLR_ODDFL1 |
+               DOFLR_DISPFL1 | DOFLR_CDEFL1  | DOFLR_RGBFL1,
+       };
+       static const u32 dpad_mask = BIT(RCAR_DU_OUTPUT_DPAD1)
+                                  | BIT(RCAR_DU_OUTPUT_DPAD0);
+       struct rcar_du_device *rcdu = rgrp->dev;
+       u32 doflr = DOFLR_CODE;
+       unsigned int i;
+
+       if (rcdu->info->gen < 2)
+               return;
+
+       /*
+        * The DPAD outputs can't be controlled directly. However, the parallel
+        * output of the DU channels routed to DPAD can be set to fixed levels
+        * through the DOFLR group register. Use this to turn the DPAD on or off
+        * by driving fixed low-level signals at the output of any DU channel
+        * not routed to a DPAD output. This doesn't affect the DU output
+        * signals going to other outputs, such as the internal LVDS and HDMI
+        * encoders.
+        */
+
+       for (i = 0; i < rgrp->num_crtcs; ++i) {
+               struct rcar_du_crtc_state *rstate;
+               struct rcar_du_crtc *rcrtc;
+
+               rcrtc = &rcdu->crtcs[rgrp->index * 2 + i];
+               rstate = to_rcar_crtc_state(rcrtc->crtc.state);
+
+               if (!(rstate->outputs & dpad_mask))
+                       doflr |= doflr_values[i];
+       }
+
+       rcar_du_group_write(rgrp, DOFLR, doflr);
+}
+
 int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
 {
-       struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2];
+       struct rcar_du_device *rcdu = rgrp->dev;
        u32 dorcr = rcar_du_group_read(rgrp, DORCR);
 
        dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
@@ -299,12 +340,14 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
         * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
         * by default.
         */
-       if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
+       if (rcdu->dpad1_source == rgrp->index * 2)
                dorcr |= DORCR_PG2D_DS1;
        else
                dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
 
        rcar_du_group_write(rgrp, DORCR, dorcr);
 
+       rcar_du_group_set_dpad_levels(rgrp);
+
        return rcar_du_set_dpad0_vsp1_routing(rgrp->dev);
 }
index 9c7007d45408b921ce7a909ee1f4d661ca55db67..b0c80dffd8b8ee96af2c4680093e1d0fa8bb4cd0 100644 (file)
@@ -7,14 +7,13 @@
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/of_graph.h>
 #include <linux/wait.h>
@@ -278,6 +277,28 @@ static int rcar_du_atomic_check(struct drm_device *dev,
 static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
+       struct rcar_du_device *rcdu = dev->dev_private;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       unsigned int i;
+
+       /*
+        * Store RGB routing to DPAD0 and DPAD1, the hardware will be configured
+        * when starting the CRTCs.
+        */
+       rcdu->dpad1_source = -1;
+
+       for_each_new_crtc_in_state(old_state, crtc, crtc_state, i) {
+               struct rcar_du_crtc_state *rcrtc_state =
+                       to_rcar_crtc_state(crtc_state);
+               struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+               if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD0))
+                       rcdu->dpad0_source = rcrtc->index;
+
+               if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
+                       rcdu->dpad1_source = rcrtc->index;
+       }
 
        /* Apply the atomic update. */
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -309,17 +330,10 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
                                     enum rcar_du_output output,
                                     struct of_endpoint *ep)
 {
-       struct device_node *connector = NULL;
-       struct device_node *encoder = NULL;
-       struct device_node *ep_node = NULL;
-       struct device_node *entity_ep_node;
        struct device_node *entity;
        int ret;
 
-       /*
-        * Locate the connected entity and infer its type from the number of
-        * endpoints.
-        */
+       /* Locate the connected entity and initialize the encoder. */
        entity = of_graph_get_remote_port_parent(ep->local_node);
        if (!entity) {
                dev_dbg(rcdu->dev, "unconnected endpoint %pOF, skipping\n",
@@ -331,52 +345,17 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
                dev_dbg(rcdu->dev,
                        "connected entity %pOF is disabled, skipping\n",
                        entity);
+               of_node_put(entity);
                return -ENODEV;
        }
 
-       entity_ep_node = of_graph_get_remote_endpoint(ep->local_node);
-
-       for_each_endpoint_of_node(entity, ep_node) {
-               if (ep_node == entity_ep_node)
-                       continue;
-
-               /*
-                * We've found one endpoint other than the input, this must
-                * be an encoder. Locate the connector.
-                */
-               encoder = entity;
-               connector = of_graph_get_remote_port_parent(ep_node);
-               of_node_put(ep_node);
-
-               if (!connector) {
-                       dev_warn(rcdu->dev,
-                                "no connector for encoder %pOF, skipping\n",
-                                encoder);
-                       of_node_put(entity_ep_node);
-                       of_node_put(encoder);
-                       return -ENODEV;
-               }
-
-               break;
-       }
-
-       of_node_put(entity_ep_node);
-
-       if (!encoder) {
-               dev_warn(rcdu->dev,
-                        "no encoder found for endpoint %pOF, skipping\n",
-                        ep->local_node);
-               return -ENODEV;
-       }
-
-       ret = rcar_du_encoder_init(rcdu, output, encoder, connector);
+       ret = rcar_du_encoder_init(rcdu, output, entity);
        if (ret && ret != -EPROBE_DEFER)
                dev_warn(rcdu->dev,
                         "failed to initialize encoder %pOF on output %u (%d), skipping\n",
-                        encoder, output, ret);
+                        entity, output, ret);
 
-       of_node_put(encoder);
-       of_node_put(connector);
+       of_node_put(entity);
 
        return ret;
 }
index 579753e04f3b37127cb37046532c29ec3bbce5f6..8bee4e787a0ab4eb06646b56f0ddff92cad130b5 100644 (file)
@@ -7,70 +7,63 @@
 
 /dts-v1/;
 /plugin/;
-/ {
-       fragment@0 {
-               target-path = "/";
-               __overlay__ {
-                       #address-cells = <2>;
-                       #size-cells = <2>;
 
-                       lvds@feb90000 {
-                               compatible = "renesas,r8a7790-lvds";
-                               reg = <0 0xfeb90000 0 0x1c>;
+&{/} {
+       #address-cells = <2>;
+       #size-cells = <2>;
 
-                               ports {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
+       lvds@feb90000 {
+               compatible = "renesas,r8a7790-lvds";
+               reg = <0 0xfeb90000 0 0x1c>;
 
-                                       port@0 {
-                                               reg = <0>;
-                                               lvds0_input: endpoint {
-                                               };
-                                       };
-                                       port@1 {
-                                               reg = <1>;
-                                               lvds0_out: endpoint {
-                                               };
-                                       };
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               lvds0_input: endpoint {
                                };
                        };
-
-                       lvds@feb94000 {
-                               compatible = "renesas,r8a7790-lvds";
-                               reg = <0 0xfeb94000 0 0x1c>;
-
-                               ports {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
-
-                                       port@0 {
-                                               reg = <0>;
-                                               lvds1_input: endpoint {
-                                               };
-                                       };
-                                       port@1 {
-                                               reg = <1>;
-                                               lvds1_out: endpoint {
-                                               };
-                                       };
+                       port@1 {
+                               reg = <1>;
+                               lvds0_out: endpoint {
                                };
                        };
                };
        };
 
-       fragment@1 {
-               target-path = "/display@feb00000/ports";
-               __overlay__ {
-                       port@1 {
-                               endpoint {
-                                       remote-endpoint = <&lvds0_input>;
+       lvds@feb94000 {
+               compatible = "renesas,r8a7790-lvds";
+               reg = <0 0xfeb94000 0 0x1c>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               lvds1_input: endpoint {
                                };
                        };
-                       port@2 {
-                               endpoint {
-                                       remote-endpoint = <&lvds1_input>;
+                       port@1 {
+                               reg = <1>;
+                               lvds1_out: endpoint {
                                };
                        };
                };
        };
 };
+
+&{/display@feb00000/ports} {
+       port@1 {
+               endpoint {
+                       remote-endpoint = <&lvds0_input>;
+               };
+       };
+       port@2 {
+               endpoint {
+                       remote-endpoint = <&lvds1_input>;
+               };
+       };
+};
index cb9da1f3942b5b30e4e731e659afddd7d562e566..92c0509971ec3df5aada19718020170253857f73 100644 (file)
@@ -7,44 +7,37 @@
 
 /dts-v1/;
 /plugin/;
-/ {
-       fragment@0 {
-               target-path = "/";
-               __overlay__ {
-                       #address-cells = <2>;
-                       #size-cells = <2>;
 
-                       lvds@feb90000 {
-                               compatible = "renesas,r8a7791-lvds";
-                               reg = <0 0xfeb90000 0 0x1c>;
+&{/} {
+       #address-cells = <2>;
+       #size-cells = <2>;
 
-                               ports {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
+       lvds@feb90000 {
+               compatible = "renesas,r8a7791-lvds";
+               reg = <0 0xfeb90000 0 0x1c>;
 
-                                       port@0 {
-                                               reg = <0>;
-                                               lvds0_input: endpoint {
-                                               };
-                                       };
-                                       port@1 {
-                                               reg = <1>;
-                                               lvds0_out: endpoint {
-                                               };
-                                       };
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               lvds0_input: endpoint {
                                };
                        };
-               };
-       };
-
-       fragment@1 {
-               target-path = "/display@feb00000/ports";
-               __overlay__ {
                        port@1 {
-                               endpoint {
-                                       remote-endpoint = <&lvds0_input>;
+                               reg = <1>;
+                               lvds0_out: endpoint {
                                };
                        };
                };
        };
 };
+
+&{/display@feb00000/ports} {
+       port@1 {
+               endpoint {
+                       remote-endpoint = <&lvds0_input>;
+               };
+       };
+};
index e7b8804dc3c1a1c5568340162fe2689c8c21a601..c8b93f21de0fbb8ed456e216aec54c463485032c 100644 (file)
@@ -7,44 +7,37 @@
 
 /dts-v1/;
 /plugin/;
-/ {
-       fragment@0 {
-               target-path = "/";
-               __overlay__ {
-                       #address-cells = <2>;
-                       #size-cells = <2>;
 
-                       lvds@feb90000 {
-                               compatible = "renesas,r8a7793-lvds";
-                               reg = <0 0xfeb90000 0 0x1c>;
+&{/} {
+       #address-cells = <2>;
+       #size-cells = <2>;
 
-                               ports {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
+       lvds@feb90000 {
+               compatible = "renesas,r8a7793-lvds";
+               reg = <0 0xfeb90000 0 0x1c>;
 
-                                       port@0 {
-                                               reg = <0>;
-                                               lvds0_input: endpoint {
-                                               };
-                                       };
-                                       port@1 {
-                                               reg = <1>;
-                                               lvds0_out: endpoint {
-                                               };
-                                       };
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               lvds0_input: endpoint {
                                };
                        };
-               };
-       };
-
-       fragment@1 {
-               target-path = "/display@feb00000/ports";
-               __overlay__ {
                        port@1 {
-                               endpoint {
-                                       remote-endpoint = <&lvds0_input>;
+                               reg = <1>;
+                               lvds0_out: endpoint {
                                };
                        };
                };
        };
 };
+
+&{/display@feb00000/ports} {
+       port@1 {
+               endpoint {
+                       remote-endpoint = <&lvds0_input>;
+               };
+       };
+};
index a1327443e6fa4d310e7c883e8885a32d9935cd03..16c2d03cb016c4701a88a81473b1a01c0e26cbec 100644 (file)
@@ -7,44 +7,37 @@
 
 /dts-v1/;
 /plugin/;
-/ {
-       fragment@0 {
-               target-path = "/soc";
-               __overlay__ {
-                       #address-cells = <2>;
-                       #size-cells = <2>;
 
-                       lvds@feb90000 {
-                               compatible = "renesas,r8a7795-lvds";
-                               reg = <0 0xfeb90000 0 0x14>;
+&{/soc} {
+       #address-cells = <2>;
+       #size-cells = <2>;
 
-                               ports {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
+       lvds@feb90000 {
+               compatible = "renesas,r8a7795-lvds";
+               reg = <0 0xfeb90000 0 0x14>;
 
-                                       port@0 {
-                                               reg = <0>;
-                                               lvds0_input: endpoint {
-                                               };
-                                       };
-                                       port@1 {
-                                               reg = <1>;
-                                               lvds0_out: endpoint {
-                                               };
-                                       };
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               lvds0_input: endpoint {
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               lvds0_out: endpoint {
                                };
                        };
                };
        };
+};
 
-       fragment@1 {
-               target-path = "/soc/display@feb00000/ports";
-               __overlay__ {
-                       port@3 {
-                               endpoint {
-                                       remote-endpoint = <&lvds0_input>;
-                               };
-                       };
+&{/soc/display@feb00000/ports} {
+       port@3 {
+               endpoint {
+                       remote-endpoint = <&lvds0_input>;
                };
        };
 };
index b23d6466c4152b6f7c1c8cab4491e08aa71c6a57..680e923ac036c9321d8cc07115381cf52760ae28 100644 (file)
@@ -7,44 +7,37 @@
 
 /dts-v1/;
 /plugin/;
-/ {
-       fragment@0 {
-               target-path = "/soc";
-               __overlay__ {
-                       #address-cells = <2>;
-                       #size-cells = <2>;
 
-                       lvds@feb90000 {
-                               compatible = "renesas,r8a7796-lvds";
-                               reg = <0 0xfeb90000 0 0x14>;
+&{/soc} {
+       #address-cells = <2>;
+       #size-cells = <2>;
 
-                               ports {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
+       lvds@feb90000 {
+               compatible = "renesas,r8a7796-lvds";
+               reg = <0 0xfeb90000 0 0x14>;
 
-                                       port@0 {
-                                               reg = <0>;
-                                               lvds0_input: endpoint {
-                                               };
-                                       };
-                                       port@1 {
-                                               reg = <1>;
-                                               lvds0_out: endpoint {
-                                               };
-                                       };
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               lvds0_input: endpoint {
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               lvds0_out: endpoint {
                                };
                        };
                };
        };
+};
 
-       fragment@1 {
-               target-path = "/soc/display@feb00000/ports";
-               __overlay__ {
-                       port@3 {
-                               endpoint {
-                                       remote-endpoint = <&lvds0_input>;
-                               };
-                       };
+&{/soc/display@feb00000/ports} {
+       port@3 {
+               endpoint {
+                       remote-endpoint = <&lvds0_input>;
                };
        };
 };
index 39d5ae3fdf72b1de5ffa079751b378a080a2087d..321ac80b48d3ebd7c524b9730f23b0518dd80927 100644 (file)
@@ -7,11 +7,9 @@
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
index 2f223a4c1d3344c7d8f05452b07fe57d856a86f7..81bbf207ad0edd4045ed5423c99b91f0b1696ba0 100644 (file)
@@ -10,8 +10,7 @@
 #ifndef __RCAR_DU_PLANE_H__
 #define __RCAR_DU_PLANE_H__
 
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
 
 struct rcar_du_format_info;
 struct rcar_du_group;
index 4576119e7777f5e4105c0b3a7d43b93fd59982ce..76a39eee7c9cf527a1900c0af4e42e7feced9923 100644 (file)
@@ -7,10 +7,8 @@
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
index e8c14dc5cb93f2aba129c363a9c47af88fb931a3..db232037f24a0394151d029676ab41fba363db4b 100644 (file)
@@ -10,8 +10,7 @@
 #ifndef __RCAR_DU_VSP_H__
 #define __RCAR_DU_VSP_H__
 
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
 
 struct rcar_du_format_info;
 struct rcar_du_vsp;
index 75490a3e0a2ab096b63e6b269b67c26bfe25cf23..452461dc96f28bb7028e4cd9cefaf6e3e45cbff1 100644 (file)
@@ -7,10 +7,12 @@
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
 #include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_modes.h>
 
 #define RCAR_HDMI_PHY_OPMODE_PLLCFG    0x06    /* Mode of operation and PLL dividers */
 #define RCAR_HDMI_PHY_PLLCURRGMPCTRL   0x10    /* PLL current and Gmp (conductance) */
@@ -35,6 +37,20 @@ static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = {
        { ~0UL,      0x0000, 0x0000, 0x0000 },
 };
 
+static enum drm_mode_status
+rcar_hdmi_mode_valid(struct drm_connector *connector,
+                    const struct drm_display_mode *mode)
+{
+       /*
+        * The maximum supported clock frequency is 297 MHz, as shown in the PHY
+        * parameters table.
+        */
+       if (mode->clock > 297000)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
 static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
                                   const struct dw_hdmi_plat_data *pdata,
                                   unsigned long mpixelclock)
@@ -59,6 +75,7 @@ static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
 }
 
 static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = {
+       .mode_valid = rcar_hdmi_mode_valid,
        .configure_phy  = rcar_hdmi_phy_configure,
 };
 
index 534a128a869d51e438ed5e5c7172ec306ef5a025..7ef97b2a6edaa63a2c8b0f4638c1e5f18f4f2dc5 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
+#include "rcar_lvds.h"
 #include "rcar_lvds_regs.h"
 
 struct rcar_lvds;
@@ -182,8 +184,9 @@ struct pll_info {
 
 static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
                                     unsigned long target, struct pll_info *pll,
-                                    u32 clksel)
+                                    u32 clksel, bool dot_clock_only)
 {
+       unsigned int div7 = dot_clock_only ? 1 : 7;
        unsigned long output;
        unsigned long fin;
        unsigned int m_min;
@@ -217,9 +220,9 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
         *                     `------------> | |
         *                                    |/
         *
-        * The /7 divider is optional when the LVDS PLL is used to generate a
-        * dot clock for the DU RGB output, without using the LVDS encoder. We
-        * don't support this configuration yet.
+        * The /7 divider is optional, it is enabled when the LVDS PLL is used
+        * to drive the LVDS encoder, and disabled when  used to generate a dot
+        * clock for the DU RGB output, without using the LVDS encoder.
         *
         * The PLL allowed input frequency range is 12 MHz to 192 MHz.
         */
@@ -279,7 +282,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
                                 * the PLL, followed by a an optional fixed /7
                                 * divider.
                                 */
-                               fout = fvco / (1 << e) / 7;
+                               fout = fvco / (1 << e) / div7;
                                div = DIV_ROUND_CLOSEST(fout, target);
                                diff = abs(fout / div - target);
 
@@ -300,7 +303,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
 
 done:
        output = fin * pll->pll_n / pll->pll_m / (1 << pll->pll_e)
-              / 7 / pll->div;
+              / div7 / pll->div;
        error = (long)(output - target) * 10000 / (long)target;
 
        dev_dbg(lvds->dev,
@@ -310,17 +313,18 @@ done:
                pll->pll_m, pll->pll_n, pll->pll_e, pll->div);
 }
 
-static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
+static void __rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
+                                       unsigned int freq, bool dot_clock_only)
 {
        struct pll_info pll = { .diff = (unsigned long)-1 };
        u32 lvdpllcr;
 
        rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.dotclkin[0], freq, &pll,
-                                LVDPLLCR_CKSEL_DU_DOTCLKIN(0));
+                                LVDPLLCR_CKSEL_DU_DOTCLKIN(0), dot_clock_only);
        rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.dotclkin[1], freq, &pll,
-                                LVDPLLCR_CKSEL_DU_DOTCLKIN(1));
+                                LVDPLLCR_CKSEL_DU_DOTCLKIN(1), dot_clock_only);
        rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.extal, freq, &pll,
-                                LVDPLLCR_CKSEL_EXTAL);
+                                LVDPLLCR_CKSEL_EXTAL, dot_clock_only);
 
        lvdpllcr = LVDPLLCR_PLLON | pll.clksel | LVDPLLCR_CLKOUT
                 | LVDPLLCR_PLLN(pll.pll_n - 1) | LVDPLLCR_PLLM(pll.pll_m - 1);
@@ -329,6 +333,9 @@ static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
                lvdpllcr |= LVDPLLCR_STP_CLKOUTE | LVDPLLCR_OUTCLKSEL
                         |  LVDPLLCR_PLLE(pll.pll_e - 1);
 
+       if (dot_clock_only)
+               lvdpllcr |= LVDPLLCR_OCKSEL;
+
        rcar_lvds_write(lvds, LVDPLLCR, lvdpllcr);
 
        if (pll.div > 1)
@@ -342,6 +349,57 @@ static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
                rcar_lvds_write(lvds, LVDDIV, 0);
 }
 
+static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
+{
+       __rcar_lvds_pll_setup_d3_e3(lvds, freq, false);
+}
+
+/* -----------------------------------------------------------------------------
+ * Clock - D3/E3 only
+ */
+
+int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
+{
+       struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+       int ret;
+
+       if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
+               return -ENODEV;
+
+       dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
+
+       WARN_ON(lvds->enabled);
+
+       ret = clk_prepare_enable(lvds->clocks.mod);
+       if (ret < 0)
+               return ret;
+
+       __rcar_lvds_pll_setup_d3_e3(lvds, freq, true);
+
+       lvds->enabled = true;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_clk_enable);
+
+void rcar_lvds_clk_disable(struct drm_bridge *bridge)
+{
+       struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+       if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
+               return;
+
+       dev_dbg(lvds->dev, "disabling LVDS PLL\n");
+
+       WARN_ON(!lvds->enabled);
+
+       rcar_lvds_write(lvds, LVDPLLCR, 0);
+
+       clk_disable_unprepare(lvds->clocks.mod);
+
+       lvds->enabled = false;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
+
 /* -----------------------------------------------------------------------------
  * Bridge
  */
@@ -520,8 +578,8 @@ static void rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds)
 }
 
 static void rcar_lvds_mode_set(struct drm_bridge *bridge,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adjusted_mode)
+                              const struct drm_display_mode *mode,
+                              const struct drm_display_mode *adjusted_mode)
 {
        struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 
@@ -544,7 +602,10 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
                return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
                                         bridge);
 
-       /* Otherwise we have a panel, create a connector. */
+       /* Otherwise if we have a panel, create a connector. */
+       if (!lvds->panel)
+               return 0;
+
        ret = drm_connector_init(bridge->dev, connector, &rcar_lvds_conn_funcs,
                                 DRM_MODE_CONNECTOR_LVDS);
        if (ret < 0)
@@ -592,7 +653,8 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
        local_output = of_graph_get_endpoint_by_regs(lvds->dev->of_node, 1, 0);
        if (!local_output) {
                dev_dbg(lvds->dev, "unconnected port@1\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto done;
        }
 
        /*
@@ -642,6 +704,15 @@ done:
        of_node_put(remote_input);
        of_node_put(remote);
 
+       /*
+        * On D3/E3 the LVDS encoder provides a clock to the DU, which can be
+        * used for the DPAD output even when the LVDS output is not connected.
+        * Don't fail probe in that case as the DU will need the bridge to
+        * control the clock.
+        */
+       if (lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)
+               return ret == -ENODEV ? 0 : ret;
+
        return ret;
 }
 
@@ -785,6 +856,8 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
 
 static const struct of_device_id rcar_lvds_of_table[] = {
        { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
+       { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
+       { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
        { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
        { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
new file mode 100644 (file)
index 0000000..a709cae
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * rcar_lvds.h  --  R-Car LVDS Encoder
+ *
+ * Copyright (C) 2013-2018 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#ifndef __RCAR_LVDS_H__
+#define __RCAR_LVDS_H__
+
+struct drm_bridge;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
+int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
+void rcar_lvds_clk_disable(struct drm_bridge *bridge);
+#else
+static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
+                                      unsigned long freq)
+{
+       return -ENOSYS;
+}
+static inline void rcar_lvds_clk_disable(struct drm_bridge *bridge) { }
+#endif /* CONFIG_DRM_RCAR_LVDS */
+
+#endif /* __RCAR_LVDS_H__ */
index 080f0535219502306774ea799c10cc30793006ca..bc44236242098f7c387b0304faf06e28cb692993 100644 (file)
 #include <linux/clk.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include <video/of_videomode.h>
 #include <video/videomode.h>
index 8ad0d773dc33a63c09a8bc246b4ceb7920e10f94..f7b9d45aa1d67bb4a598159c7a85b2af6b06f61c 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/clk.h>
 #include <linux/component.h>
index f57e296401b8906b5bd0fb0fb259cd92d391009c..48fef95cb3c6d442f33fd6f120d4769c3c477d5d 100644 (file)
@@ -16,9 +16,9 @@
 #define _CDN_DP_CORE_H
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include "rockchip_drm_drv.h"
 
 #define MAX_PHY                2
index 5a485489a1e23cca8b3c455d368d42320140774e..6c8b14fb1d2f3c11ddefff385bff3a070f99ba3b 100644 (file)
@@ -113,7 +113,7 @@ static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
 
 static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
                                           u8 module_id, u8 opcode,
-                                          u8 req_size)
+                                          u16 req_size)
 {
        u32 mbox_size, i;
        u8 header[4];
index 7ee359bcee62521b5e83ef2533fcb065998440b9..ef8486e5e2cd121681a0710e4b5b41e06f24bd50 100644 (file)
@@ -467,7 +467,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
 }
 
 static int
-dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
+dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
                          unsigned long mode_flags, u32 lanes, u32 format,
                          unsigned int *lane_mbps)
 {
index 89c63cfde5c85073de5603cc8a2be3253f7bfb82..4cdc9f86c2e5b53bd6ef39e104adb5a3780010ec 100644 (file)
@@ -16,8 +16,8 @@
 
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/bridge/dw_hdmi.h>
 
 #include "rockchip_drm_drv.h"
index 1c02b3e61299c800549519a9f065ac2691f665fc..ce1545862b6ce433a3ef97e82a978904c27e4a9a 100644 (file)
@@ -26,8 +26,8 @@
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -295,7 +295,9 @@ static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi,
        union hdmi_infoframe frame;
        int rc;
 
-       rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+       rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                     &hdmi->connector,
+                                                     mode);
 
        if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
                frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
index be6c2573039af0c82749828100b48fef5c8dc75c..d7fa17f127695a81d3c6fd0390d67aa244e6a498 100644 (file)
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-iommu.h>
 #include <linux/pm_runtime.h>
index ea18cb2a76c0dc7c7e9052307dda301743a607c2..97438bbbe3892c5b231df6e457b66509e225f7dc 100644 (file)
@@ -17,8 +17,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
@@ -127,42 +127,6 @@ err_gem_object_unreference:
        return ERR_PTR(ret);
 }
 
-static void
-rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
-{
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *crtc_state;
-       struct drm_encoder *encoder;
-       u32 encoder_mask = 0;
-       int i;
-
-       for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
-               encoder_mask |= crtc_state->encoder_mask;
-               encoder_mask |= crtc->state->encoder_mask;
-       }
-
-       drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
-               rockchip_drm_psr_inhibit_get(encoder);
-}
-
-static void
-rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
-{
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *crtc_state;
-       struct drm_encoder *encoder;
-       u32 encoder_mask = 0;
-       int i;
-
-       for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
-               encoder_mask |= crtc_state->encoder_mask;
-               encoder_mask |= crtc->state->encoder_mask;
-       }
-
-       drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
-               rockchip_drm_psr_inhibit_put(encoder);
-}
-
 static void
 rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
 {
index e6650553f5d6cdc93f706dbef2482efe308dac19..8ce68bd508bed9aba495c2d523b4b08169db272a 100644 (file)
@@ -15,7 +15,7 @@
 #include <drm/drm.h>
 #include <drm/drmP.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
@@ -91,7 +91,6 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
        }
 
        fbi->par = helper;
-       fbi->flags = FBINFO_FLAG_DEFAULT;
        fbi->fbops = &rockchip_drm_fbdev_ops;
 
        fb = helper->fb;
index 01ff3c8588750ea466be307a1f820f4ed73f2fff..a0c8bd235b675b26ff7192367b36d05463479b41 100644 (file)
@@ -13,7 +13,8 @@
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_probe_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_psr.h"
@@ -109,6 +110,42 @@ int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder)
 }
 EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put);
 
+void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_encoder *encoder;
+       u32 encoder_mask = 0;
+       int i;
+
+       for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+               encoder_mask |= crtc_state->encoder_mask;
+               encoder_mask |= crtc->state->encoder_mask;
+       }
+
+       drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+               rockchip_drm_psr_inhibit_get(encoder);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get_state);
+
+void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_encoder *encoder;
+       u32 encoder_mask = 0;
+       int i;
+
+       for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+               encoder_mask |= crtc_state->encoder_mask;
+               encoder_mask |= crtc->state->encoder_mask;
+       }
+
+       drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+               rockchip_drm_psr_inhibit_put(encoder);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put_state);
+
 /**
  * rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
  * @encoder: encoder to obtain the PSR encoder
index 860c62494496e67665267ac5fccba76040fa46fb..25350ba3237b6648cec2a751b2b04bd72cefa21e 100644 (file)
@@ -20,6 +20,9 @@ void rockchip_drm_psr_flush_all(struct drm_device *dev);
 int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
 int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);
 
+void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state);
+void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state);
+
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
                        int (*psr_set)(struct drm_encoder *, bool enable));
 void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
index db8358e6d230a727a5d44b139e45897b6d6de156..c7d4c6073ea59b70c56559288def3fb7fd6fe215 100644 (file)
 #include <drm/drm.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_flip_work.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #ifdef CONFIG_DRM_ANALOGIX_DP
 #include <drm/bridge/analogix_dp.h>
 #endif
 #include "rockchip_drm_vop.h"
 #include "rockchip_rgb.h"
 
-#define VOP_WIN_SET(x, win, name, v) \
+#define VOP_WIN_SET(vop, win, name, v) \
                vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
-#define VOP_SCL_SET(x, win, name, v) \
+#define VOP_SCL_SET(vop, win, name, v) \
                vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
-#define VOP_SCL_SET_EXT(x, win, name, v) \
+#define VOP_SCL_SET_EXT(vop, win, name, v) \
                vop_reg_set(vop, &win->phy->scl->ext->name, \
                            win->base, ~0, v, #name)
 
+#define VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, name, v) \
+       do { \
+               if (win_yuv2yuv && win_yuv2yuv->name.mask) \
+                       vop_reg_set(vop, &win_yuv2yuv->name, 0, ~0, v, #name); \
+       } while (0)
+
+#define VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, win_yuv2yuv, name, v) \
+       do { \
+               if (win_yuv2yuv && win_yuv2yuv->phy->name.mask) \
+                       vop_reg_set(vop, &win_yuv2yuv->phy->name, win_yuv2yuv->base, ~0, v, #name); \
+       } while (0)
+
 #define VOP_INTR_SET_MASK(vop, name, mask, v) \
                vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
 
 #define VOP_INTR_GET_TYPE(vop, name, type) \
                vop_get_intr_type(vop, &vop->data->intr->name, type)
 
-#define VOP_WIN_GET(x, win, name) \
-               vop_read_reg(x, win->offset, win->phy->name)
+#define VOP_WIN_GET(vop, win, name) \
+               vop_read_reg(vop, win->offset, win->phy->name)
+
+#define VOP_WIN_HAS_REG(win, name) \
+       (!!(win->phy->name.mask))
 
 #define VOP_WIN_GET_YRGBADDR(vop, win) \
                vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
 #define to_vop(x) container_of(x, struct vop, crtc)
 #define to_vop_win(x) container_of(x, struct vop_win, base)
 
+/*
+ * The coefficients of the following matrix are all fixed points.
+ * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
+ * They are all represented in two's complement.
+ */
+static const uint32_t bt601_yuv2rgb[] = {
+       0x4A8, 0x0,    0x662,
+       0x4A8, 0x1E6F, 0x1CBF,
+       0x4A8, 0x812,  0x0,
+       0x321168, 0x0877CF, 0x2EB127
+};
+
 enum vop_pending {
        VOP_PENDING_FB_UNREF,
 };
@@ -92,6 +120,7 @@ enum vop_pending {
 struct vop_win {
        struct drm_plane base;
        const struct vop_win_data *data;
+       const struct vop_win_yuv2yuv_data *yuv2yuv_data;
        struct vop *vop;
 };
 
@@ -686,6 +715,11 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
+       if (fb->format->is_yuv && state->rotation & DRM_MODE_REFLECT_Y) {
+               DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -713,6 +747,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        struct drm_crtc *crtc = state->crtc;
        struct vop_win *vop_win = to_vop_win(plane);
        const struct vop_win_data *win = vop_win->data;
+       const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data;
        struct vop *vop = to_vop(state->crtc);
        struct drm_framebuffer *fb = state->fb;
        unsigned int actual_w, actual_h;
@@ -728,6 +763,8 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        bool rb_swap;
        int win_index = VOP_WIN_TO_INDEX(vop_win);
        int format;
+       int is_yuv = fb->format->is_yuv;
+       int i;
 
        /*
         * can't update plane when vop is disabled.
@@ -761,6 +798,13 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        offset += (src->y1 >> 16) * fb->pitches[0];
        dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
 
+       /*
+        * For y-mirroring we need to move address
+        * to the beginning of the last line.
+        */
+       if (state->rotation & DRM_MODE_REFLECT_Y)
+               dma_addr += (actual_h - 1) * fb->pitches[0];
+
        format = vop_convert_format(fb->format->format);
 
        spin_lock(&vop->reg_lock);
@@ -768,7 +812,13 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        VOP_WIN_SET(vop, win, format, format);
        VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
        VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
-       if (fb->format->is_yuv) {
+       VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv);
+       VOP_WIN_SET(vop, win, y_mir_en,
+                   (state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0);
+       VOP_WIN_SET(vop, win, x_mir_en,
+                   (state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0);
+
+       if (is_yuv) {
                int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
                int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
                int bpp = fb->format->cpp[1];
@@ -782,6 +832,13 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
                dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
                VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
                VOP_WIN_SET(vop, win, uv_mst, dma_addr);
+
+               for (i = 0; i < NUM_YUV2YUV_COEFFICIENTS; i++) {
+                       VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop,
+                                                       win_yuv2yuv,
+                                                       y2r_coefficients[i],
+                                                       bt601_yuv2rgb[i]);
+               }
        }
 
        if (win->phy->scl)
@@ -820,10 +877,83 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        spin_unlock(&vop->reg_lock);
 }
 
+static int vop_plane_atomic_async_check(struct drm_plane *plane,
+                                       struct drm_plane_state *state)
+{
+       struct vop_win *vop_win = to_vop_win(plane);
+       const struct vop_win_data *win = vop_win->data;
+       int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
+                                       DRM_PLANE_HELPER_NO_SCALING;
+       int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
+                                       DRM_PLANE_HELPER_NO_SCALING;
+       struct drm_crtc_state *crtc_state;
+
+       if (plane != state->crtc->cursor)
+               return -EINVAL;
+
+       if (!plane->state)
+               return -EINVAL;
+
+       if (!plane->state->fb)
+               return -EINVAL;
+
+       if (state->state)
+               crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+                                                               state->crtc);
+       else /* Special case for asynchronous cursor updates. */
+               crtc_state = plane->crtc->state;
+
+       return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
+                                                  min_scale, max_scale,
+                                                  true, true);
+}
+
+static void vop_plane_atomic_async_update(struct drm_plane *plane,
+                                         struct drm_plane_state *new_state)
+{
+       struct vop *vop = to_vop(plane->state->crtc);
+       struct drm_plane_state *plane_state;
+
+       plane_state = plane->funcs->atomic_duplicate_state(plane);
+       plane_state->crtc_x = new_state->crtc_x;
+       plane_state->crtc_y = new_state->crtc_y;
+       plane_state->crtc_h = new_state->crtc_h;
+       plane_state->crtc_w = new_state->crtc_w;
+       plane_state->src_x = new_state->src_x;
+       plane_state->src_y = new_state->src_y;
+       plane_state->src_h = new_state->src_h;
+       plane_state->src_w = new_state->src_w;
+
+       if (plane_state->fb != new_state->fb)
+               drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
+
+       swap(plane_state, plane->state);
+
+       if (plane->state->fb && plane->state->fb != new_state->fb) {
+               drm_framebuffer_get(plane->state->fb);
+               WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
+               drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
+               set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+       }
+
+       if (vop->is_enabled) {
+               rockchip_drm_psr_inhibit_get_state(new_state->state);
+               vop_plane_atomic_update(plane, plane->state);
+               spin_lock(&vop->reg_lock);
+               vop_cfg_done(vop);
+               spin_unlock(&vop->reg_lock);
+               rockchip_drm_psr_inhibit_put_state(new_state->state);
+       }
+
+       plane->funcs->atomic_destroy_state(plane, plane_state);
+}
+
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
        .atomic_check = vop_plane_atomic_check,
        .atomic_update = vop_plane_atomic_update,
        .atomic_disable = vop_plane_atomic_disable,
+       .atomic_async_check = vop_plane_atomic_async_check,
+       .atomic_async_update = vop_plane_atomic_async_update,
        .prepare_fb = drm_gem_fb_prepare_fb,
 };
 
@@ -1274,6 +1404,18 @@ out:
        return ret;
 }
 
+static void vop_plane_add_properties(struct drm_plane *plane,
+                                    const struct vop_win_data *win_data)
+{
+       unsigned int flags = 0;
+
+       flags |= VOP_WIN_HAS_REG(win_data, x_mir_en) ? DRM_MODE_REFLECT_X : 0;
+       flags |= VOP_WIN_HAS_REG(win_data, y_mir_en) ? DRM_MODE_REFLECT_Y : 0;
+       if (flags)
+               drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
+                                                  DRM_MODE_ROTATE_0 | flags);
+}
+
 static int vop_create_crtc(struct vop *vop)
 {
        const struct vop_data *vop_data = vop->data;
@@ -1311,6 +1453,7 @@ static int vop_create_crtc(struct vop *vop)
 
                plane = &vop_win->base;
                drm_plane_helper_add(plane, &plane_helper_funcs);
+               vop_plane_add_properties(plane, win_data);
                if (plane->type == DRM_PLANE_TYPE_PRIMARY)
                        primary = plane;
                else if (plane->type == DRM_PLANE_TYPE_CURSOR)
@@ -1348,6 +1491,7 @@ static int vop_create_crtc(struct vop *vop)
                        goto err_cleanup_crtc;
                }
                drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
+               vop_plane_add_properties(&vop_win->base, win_data);
        }
 
        port = of_get_child_by_name(dev->of_node, "port");
@@ -1531,6 +1675,9 @@ static void vop_win_init(struct vop *vop)
 
                vop_win->data = win_data;
                vop_win->vop = vop;
+
+               if (vop_data->win_yuv2yuv)
+                       vop_win->yuv2yuv_data = &vop_data->win_yuv2yuv[i];
        }
 }
 
index 0fe40e1983d97bef4d12f9f8e305fdc38767066b..04ed401d2325e6288225aed9a6bbdba37f2c447d 100644 (file)
@@ -23,6 +23,8 @@
 #define VOP_MAJOR(version)             ((version) >> 8)
 #define VOP_MINOR(version)             ((version) & 0xff)
 
+#define NUM_YUV2YUV_COEFFICIENTS 12
+
 enum vop_data_format {
        VOP_FMT_ARGB8888 = 0,
        VOP_FMT_RGB888,
@@ -124,6 +126,10 @@ struct vop_scl_regs {
        struct vop_reg scale_cbcr_y;
 };
 
+struct vop_yuv2yuv_phy {
+       struct vop_reg y2r_coefficients[NUM_YUV2YUV_COEFFICIENTS];
+};
+
 struct vop_win_phy {
        const struct vop_scl_regs *scl;
        const uint32_t *data_formats;
@@ -140,12 +146,20 @@ struct vop_win_phy {
        struct vop_reg uv_mst;
        struct vop_reg yrgb_vir;
        struct vop_reg uv_vir;
+       struct vop_reg y_mir_en;
+       struct vop_reg x_mir_en;
 
        struct vop_reg dst_alpha_ctl;
        struct vop_reg src_alpha_ctl;
        struct vop_reg channel;
 };
 
+struct vop_win_yuv2yuv_data {
+       uint32_t base;
+       const struct vop_yuv2yuv_phy *phy;
+       struct vop_reg y2r_en;
+};
+
 struct vop_win_data {
        uint32_t base;
        const struct vop_win_phy *phy;
@@ -159,6 +173,7 @@ struct vop_data {
        const struct vop_misc *misc;
        const struct vop_modeset *modeset;
        const struct vop_output *output;
+       const struct vop_win_yuv2yuv_data *win_yuv2yuv;
        const struct vop_win_data *win;
        unsigned int win_size;
 
index 456bd9f13baefdc87865e9a7404e4caa727b0b4c..e52dd5a8529e3f18dac8404261bdb4ba7fd16425 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/component.h>
 #include <linux/clk.h>
index 96ac1458a59c945ff11fe9640adcb4444d8e3606..c3dd750c7189b640afcdda4c48cfca24d39aa86a 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/component.h>
 #include <linux/of_graph.h>
index 08fc40af52c89d36b4964fc0714990d2d82b4ea6..bd76328c0fdb5f378ac5b2e91f7f7867db24fdcc 100644 (file)
@@ -299,6 +299,114 @@ static const struct vop_data px30_vop_lit = {
        .win_size = ARRAY_SIZE(px30_vop_lit_win_data),
 };
 
+static const struct vop_scl_regs rk3066_win_scl = {
+       .scale_yrgb_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+       .scale_yrgb_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+       .scale_cbcr_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+       .scale_cbcr_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy rk3066_win0_data = {
+       .scl = &rk3066_win_scl,
+       .data_formats = formats_win_full,
+       .nformats = ARRAY_SIZE(formats_win_full),
+       .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
+       .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
+       .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
+       .act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3066_WIN0_YRGB_MST0, 0xffffffff, 0),
+       .uv_mst = VOP_REG(RK3066_WIN0_CBR_MST0, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3066_WIN0_VIR, 0xffff, 0),
+       .uv_vir = VOP_REG(RK3066_WIN0_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy rk3066_win1_data = {
+       .scl = &rk3066_win_scl,
+       .data_formats = formats_win_full,
+       .nformats = ARRAY_SIZE(formats_win_full),
+       .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
+       .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
+       .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
+       .act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3066_WIN1_YRGB_MST, 0xffffffff, 0),
+       .uv_mst = VOP_REG(RK3066_WIN1_CBR_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3066_WIN1_VIR, 0xffff, 0),
+       .uv_vir = VOP_REG(RK3066_WIN1_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy rk3066_win2_data = {
+       .data_formats = formats_win_lite,
+       .nformats = ARRAY_SIZE(formats_win_lite),
+       .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
+       .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
+       .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
+       .dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3066_WIN2_VIR, 0xffff, 0),
+};
+
+static const struct vop_modeset rk3066_modeset = {
+       .htotal_pw = VOP_REG(RK3066_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+       .hact_st_end = VOP_REG(RK3066_DSP_HACT_ST_END, 0x1fff1fff, 0),
+       .vtotal_pw = VOP_REG(RK3066_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+       .vact_st_end = VOP_REG(RK3066_DSP_VACT_ST_END, 0x1fff1fff, 0),
+};
+
+static const struct vop_output rk3066_output = {
+       .pin_pol = VOP_REG(RK3066_DSP_CTRL0, 0x7, 4),
+};
+
+static const struct vop_common rk3066_common = {
+       .standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
+       .out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
+       .cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
+       .dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
+};
+
+static const struct vop_win_data rk3066_vop_win_data[] = {
+       { .base = 0x00, .phy = &rk3066_win0_data,
+         .type = DRM_PLANE_TYPE_PRIMARY },
+       { .base = 0x00, .phy = &rk3066_win1_data,
+         .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x00, .phy = &rk3066_win2_data,
+         .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const int rk3066_vop_intrs[] = {
+       /*
+        * hs_start interrupt fires at frame-start, so serves
+        * the same purpose as dsp_hold in the driver.
+        */
+       DSP_HOLD_VALID_INTR,
+       FS_INTR,
+       LINE_FLAG_INTR,
+       BUS_ERROR_INTR,
+};
+
+static const struct vop_intr rk3066_intr = {
+       .intrs = rk3066_vop_intrs,
+       .nintrs = ARRAY_SIZE(rk3066_vop_intrs),
+       .line_flag_num[0] = VOP_REG(RK3066_INT_STATUS, 0xfff, 12),
+       .status = VOP_REG(RK3066_INT_STATUS, 0xf, 0),
+       .enable = VOP_REG(RK3066_INT_STATUS, 0xf, 4),
+       .clear = VOP_REG(RK3066_INT_STATUS, 0xf, 8),
+};
+
+static const struct vop_data rk3066_vop = {
+       .version = VOP_VERSION(2, 1),
+       .intr = &rk3066_intr,
+       .common = &rk3066_common,
+       .modeset = &rk3066_modeset,
+       .output = &rk3066_output,
+       .win = rk3066_vop_win_data,
+       .win_size = ARRAY_SIZE(rk3066_vop_win_data),
+};
+
 static const struct vop_scl_regs rk3188_win_scl = {
        .scale_yrgb_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
        .scale_yrgb_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
@@ -550,6 +658,27 @@ static const struct vop_intr rk3368_vop_intr = {
        .clear = VOP_REG_MASK_SYNC(RK3368_INTR_CLEAR, 0x3fff, 0),
 };
 
+static const struct vop_win_phy rk3368_win01_data = {
+       .scl = &rk3288_win_full_scl,
+       .data_formats = formats_win_full,
+       .nformats = ARRAY_SIZE(formats_win_full),
+       .enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
+       .format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
+       .rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
+       .x_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 21),
+       .y_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 22),
+       .act_info = VOP_REG(RK3368_WIN0_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(RK3368_WIN0_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3368_WIN0_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3368_WIN0_YRGB_MST, 0xffffffff, 0),
+       .uv_mst = VOP_REG(RK3368_WIN0_CBR_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 0),
+       .uv_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 16),
+       .src_alpha_ctl = VOP_REG(RK3368_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+       .dst_alpha_ctl = VOP_REG(RK3368_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+       .channel = VOP_REG(RK3368_WIN0_CTRL2, 0xff, 0),
+};
+
 static const struct vop_win_phy rk3368_win23_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
@@ -557,6 +686,7 @@ static const struct vop_win_phy rk3368_win23_data = {
        .enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
        .format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
        .rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20),
+       .y_mir_en = VOP_REG(RK3368_WIN2_CTRL1, 0x1, 15),
        .dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0),
        .dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0),
        .yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0),
@@ -566,9 +696,9 @@ static const struct vop_win_phy rk3368_win23_data = {
 };
 
 static const struct vop_win_data rk3368_vop_win_data[] = {
-       { .base = 0x00, .phy = &rk3288_win01_data,
+       { .base = 0x00, .phy = &rk3368_win01_data,
          .type = DRM_PLANE_TYPE_PRIMARY },
-       { .base = 0x40, .phy = &rk3288_win01_data,
+       { .base = 0x40, .phy = &rk3368_win01_data,
          .type = DRM_PLANE_TYPE_OVERLAY },
        { .base = 0x00, .phy = &rk3368_win23_data,
          .type = DRM_PLANE_TYPE_OVERLAY },
@@ -637,6 +767,34 @@ static const struct vop_output rk3399_output = {
        .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
 };
 
+static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = {
+       .y2r_coefficients = {
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 16),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 0),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 16),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 0),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 16),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 0),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 16),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 16, 0xffff, 0),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 20, 0xffffffff, 0),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 24, 0xffffffff, 0),
+               VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 28, 0xffffffff, 0),
+       },
+};
+
+static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win23_data = { };
+
+static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
+       { .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
+         .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1) },
+       { .base = 0x60, .phy = &rk3399_yuv2yuv_win01_data,
+         .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
+       { .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
+       { .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
+};
+
 static const struct vop_data rk3399_vop_big = {
        .version = VOP_VERSION(3, 5),
        .feature = VOP_FEATURE_OUTPUT_RGB10,
@@ -647,15 +805,22 @@ static const struct vop_data rk3399_vop_big = {
        .misc = &rk3368_misc,
        .win = rk3368_vop_win_data,
        .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+       .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
 };
 
 static const struct vop_win_data rk3399_vop_lit_win_data[] = {
-       { .base = 0x00, .phy = &rk3288_win01_data,
+       { .base = 0x00, .phy = &rk3368_win01_data,
          .type = DRM_PLANE_TYPE_PRIMARY },
        { .base = 0x00, .phy = &rk3368_win23_data,
          .type = DRM_PLANE_TYPE_CURSOR},
 };
 
+static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = {
+       { .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
+         .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1)},
+       { .base = 0x60, .phy = &rk3399_yuv2yuv_win23_data },
+};
+
 static const struct vop_data rk3399_vop_lit = {
        .version = VOP_VERSION(3, 6),
        .intr = &rk3366_vop_intr,
@@ -665,6 +830,7 @@ static const struct vop_data rk3399_vop_lit = {
        .misc = &rk3368_misc,
        .win = rk3399_vop_lit_win_data,
        .win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
+       .win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data,
 };
 
 static const struct vop_win_data rk3228_vop_win_data[] = {
@@ -730,11 +896,11 @@ static const struct vop_intr rk3328_vop_intr = {
 };
 
 static const struct vop_win_data rk3328_vop_win_data[] = {
-       { .base = 0xd0, .phy = &rk3288_win01_data,
+       { .base = 0xd0, .phy = &rk3368_win01_data,
          .type = DRM_PLANE_TYPE_PRIMARY },
-       { .base = 0x1d0, .phy = &rk3288_win01_data,
+       { .base = 0x1d0, .phy = &rk3368_win01_data,
          .type = DRM_PLANE_TYPE_OVERLAY },
-       { .base = 0x2d0, .phy = &rk3288_win01_data,
+       { .base = 0x2d0, .phy = &rk3368_win01_data,
          .type = DRM_PLANE_TYPE_CURSOR },
 };
 
@@ -759,6 +925,8 @@ static const struct of_device_id vop_driver_dt_match[] = {
          .data = &px30_vop_big },
        { .compatible = "rockchip,px30-vop-lit",
          .data = &px30_vop_lit },
+       { .compatible = "rockchip,rk3066-vop",
+         .data = &rk3066_vop },
        { .compatible = "rockchip,rk3188-vop",
          .data = &rk3188_vop },
        { .compatible = "rockchip,rk3288-vop",
index 7348c68352ed96d873230efb6129a0ed7f916243..d837d4a7df4a1106537d46bed7dff72610ba2ce3 100644 (file)
 #define RK3188_REG_CFG_DONE            0x90
 /* rk3188 register definition end */
 
+/* rk3066 register definition */
+#define RK3066_SYS_CTRL0               0x00
+#define RK3066_SYS_CTRL1               0x04
+#define RK3066_DSP_CTRL0               0x08
+#define RK3066_DSP_CTRL1               0x0c
+#define RK3066_INT_STATUS              0x10
+#define RK3066_MCU_CTRL                        0x14
+#define RK3066_BLEND_CTRL              0x18
+#define RK3066_WIN0_COLOR_KEY_CTRL     0x1c
+#define RK3066_WIN1_COLOR_KEY_CTRL     0x20
+#define RK3066_WIN2_COLOR_KEY_CTRL     0x24
+#define RK3066_WIN0_YRGB_MST0          0x28
+#define RK3066_WIN0_CBR_MST0           0x2c
+#define RK3066_WIN0_YRGB_MST1          0x30
+#define RK3066_WIN0_CBR_MST1           0x34
+#define RK3066_WIN0_VIR                        0x38
+#define RK3066_WIN0_ACT_INFO           0x3c
+#define RK3066_WIN0_DSP_INFO           0x40
+#define RK3066_WIN0_DSP_ST             0x44
+#define RK3066_WIN0_SCL_FACTOR_YRGB    0x48
+#define RK3066_WIN0_SCL_FACTOR_CBR     0x4c
+#define RK3066_WIN0_SCL_OFFSET         0x50
+#define RK3066_WIN1_YRGB_MST           0x54
+#define RK3066_WIN1_CBR_MST            0x58
+#define RK3066_WIN1_VIR                        0x5c
+#define RK3066_WIN1_ACT_INFO           0x60
+#define RK3066_WIN1_DSP_INFO           0x64
+#define RK3066_WIN1_DSP_ST             0x68
+#define RK3066_WIN1_SCL_FACTOR_YRGB    0x6c
+#define RK3066_WIN1_SCL_FACTOR_CBR     0x70
+#define RK3066_WIN1_SCL_OFFSET         0x74
+#define RK3066_WIN2_MST                        0x78
+#define RK3066_WIN2_VIR                        0x7c
+#define RK3066_WIN2_DSP_INFO           0x80
+#define RK3066_WIN2_DSP_ST             0x84
+#define RK3066_HWC_MST                 0x88
+#define RK3066_HWC_DSP_ST              0x8c
+#define RK3066_HWC_COLOR_LUT0          0x90
+#define RK3066_HWC_COLOR_LUT1          0x94
+#define RK3066_HWC_COLOR_LUT2          0x98
+#define RK3066_DSP_HTOTAL_HS_END       0x9c
+#define RK3066_DSP_HACT_ST_END         0xa0
+#define RK3066_DSP_VTOTAL_VS_END       0xa4
+#define RK3066_DSP_VACT_ST_END         0xa8
+#define RK3066_DSP_VS_ST_END_F1                0xac
+#define RK3066_DSP_VACT_ST_END_F1      0xb0
+#define RK3066_REG_CFG_DONE            0xc0
+#define RK3066_MCU_BYPASS_WPORT                0x100
+#define RK3066_MCU_BYPASS_RPORT                0x200
+#define RK3066_WIN2_LUT_ADDR           0x400
+#define RK3066_DSP_LUT_ADDR            0x800
+/* rk3066 register definition end */
+
 #endif /* _ROCKCHIP_VOP_REG_H */
index 7559a820bd435362412ecb4eb76f6bc26735ad95..ebb8b7d32b3345dd32ec0ba4b530b21c67d935f3 100644 (file)
@@ -299,6 +299,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
        case SAVAGE_PRIM_TRILIST_201:
                reorder = 1;
                prim = SAVAGE_PRIM_TRILIST;
+               /* fall through */
        case SAVAGE_PRIM_TRILIST:
                if (n % 3 != 0) {
                        DRM_ERROR("wrong number of vertices %u in TRILIST\n",
@@ -436,6 +437,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
        case SAVAGE_PRIM_TRILIST_201:
                reorder = 1;
                prim = SAVAGE_PRIM_TRILIST;
+               /* fall through */
        case SAVAGE_PRIM_TRILIST:
                if (n % 3 != 0) {
                        DRM_ERROR("wrong number of vertices %u in TRILIST\n",
@@ -557,6 +559,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
        case SAVAGE_PRIM_TRILIST_201:
                reorder = 1;
                prim = SAVAGE_PRIM_TRILIST;
+               /* fall through */
        case SAVAGE_PRIM_TRILIST:
                if (n % 3 != 0) {
                        DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
@@ -695,6 +698,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
        case SAVAGE_PRIM_TRILIST_201:
                reorder = 1;
                prim = SAVAGE_PRIM_TRILIST;
+               /* fall through */
        case SAVAGE_PRIM_TRILIST:
                if (n % 3 != 0) {
                        DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
index 499b5fdb869f2064a7c879cbd4066d810ac46622..b6988a6d698ef265f0602045d2d0d1c30aad95c7 100644 (file)
@@ -16,6 +16,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "shmob_drm_backlight.h"
 #include "shmob_drm_crtc.h"
index 8554102a6ead189e3b29f380c89867f942b2b73e..cb821adfc3211e57604e4239aaf44c7ecdfef08a 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "shmob_drm_drv.h"
 #include "shmob_drm_kms.h"
@@ -126,7 +127,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
 DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops);
 
 static struct drm_driver shmob_drm_driver = {
-       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET
                                | DRIVER_PRIME,
        .irq_handler            = shmob_drm_irq,
        .gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -229,8 +230,8 @@ static int shmob_drm_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        sdev->mmio = devm_ioremap_resource(&pdev->dev, res);
-       if (sdev->mmio == NULL)
-               return -ENOMEM;
+       if (IS_ERR(sdev->mmio))
+               return PTR_ERR(sdev->mmio);
 
        ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
        if (ret < 0)
index a17268444c6d3206c2408159410449e506dedee1..2e08bc203bf9437cf618a719aa88de92c3d8dd86 100644 (file)
@@ -13,6 +13,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "shmob_drm_crtc.h"
 #include "shmob_drm_drv.h"
index ed76e52eb21333dd3d042bafe938f062c1069e4f..387f0bed6c1cc0aba1eaa9aa1faea0230772c847 100644 (file)
@@ -11,8 +11,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sti_compositor.h"
 #include "sti_crtc.h"
@@ -53,18 +53,10 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
        struct clk *compo_clk, *pix_clk;
        int rate = mode->clock * 1000;
 
-       DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
-                     crtc->base.id, sti_mixer_to_str(mixer),
-                     mode->base.id, mode->name);
-
-       DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
-                     mode->vrefresh, mode->clock,
-                     mode->hdisplay,
-                     mode->hsync_start, mode->hsync_end,
-                     mode->htotal,
-                     mode->vdisplay,
-                     mode->vsync_start, mode->vsync_end,
-                     mode->vtotal, mode->type, mode->flags);
+       DRM_DEBUG_KMS("CRTC:%d (%s) mode: (%s)\n",
+                     crtc->base.id, sti_mixer_to_str(mixer), mode->name);
+
+       DRM_DEBUG_KMS(DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
        if (mixer->id == STI_MIXER_MAIN) {
                compo_clk = compo->clk_compo_main;
index ac54e0f9caea3068ab75bca15cc774cb2f36e4ab..a525fd899f68856a48e93b7130e30a178918c9fb 100644 (file)
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sti_crtc.h"
 #include "sti_drv.h"
index b08376b7611b896fbcb8fd6b3e91fe44dd4b7015..b31cc2672d36989feac457f3289a5a170fd88de5 100644 (file)
@@ -13,8 +13,8 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sti_awg_utils.h"
 #include "sti_drv.h"
@@ -277,8 +277,8 @@ static void sti_dvo_pre_enable(struct drm_bridge *bridge)
 }
 
 static void sti_dvo_set_mode(struct drm_bridge *bridge,
-                            struct drm_display_mode *mode,
-                            struct drm_display_mode *adjusted_mode)
+                            const struct drm_display_mode *mode,
+                            const struct drm_display_mode *adjusted_mode)
 {
        struct sti_dvo *dvo = bridge->driver_private;
        struct sti_mixer *mixer = to_sti_mixer(dvo->encoder->crtc);
index 19b9b5ed129700730e5c77ea5c37e9edbb0db5e0..ff9256673fc8cae5cc8247ed0022f7ea192e5691 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 /* HDformatter registers */
 #define HDA_ANA_CFG                     0x0000
@@ -508,8 +508,8 @@ static void sti_hda_pre_enable(struct drm_bridge *bridge)
 }
 
 static void sti_hda_set_mode(struct drm_bridge *bridge,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
+                            const struct drm_display_mode *mode,
+                            const struct drm_display_mode *adjusted_mode)
 {
        struct sti_hda *hda = bridge->driver_private;
        u32 mode_idx;
index ccf718404a1c23e557dd6ff097d779f3bff9729c..6000df62498077609ac4761c70d045d19f6ba590 100644 (file)
@@ -15,8 +15,8 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 
 #include <sound/hdmi-codec.h>
 
@@ -434,7 +434,8 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
 
        DRM_DEBUG_DRIVER("\n");
 
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode, false);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe,
+                                                      hdmi->drm_connector, mode);
        if (ret < 0) {
                DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
                return ret;
@@ -917,8 +918,8 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
 }
 
 static void sti_hdmi_set_mode(struct drm_bridge *bridge,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
+                             const struct drm_display_mode *mode,
+                             const struct drm_display_mode *adjusted_mode)
 {
        struct sti_hdmi *hdmi = bridge->driver_private;
        int ret;
index ea4a3b87fa55c3983e15b6f62daab39dfc1f79f9..c42f2fa7053c2f942f5090b83ec75d869e1d1e0f 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/seq_file.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "sti_crtc.h"
 #include "sti_drv.h"
index 8dec001b9d37c4ffde3700c0d49cf874b7face64..0a7f933ab007f1a84373a42a077d9fe51f6ffdcc 100644 (file)
@@ -9,15 +9,19 @@
  */
 
 #include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
 #include <linux/of_platform.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
 
 #include "ltdc.h"
 
index a514b593f37c531b2a1f7fa3b04d625f64edd8a3..a672b59a22262c949c8519840e9f29c1225d5f83 100644 (file)
@@ -215,7 +215,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
 }
 
 static int
-dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
+dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
                          unsigned long mode_flags, u32 lanes, u32 format,
                          unsigned int *lane_mbps)
 {
index 61dd661aa0acbde3abfe9481c9a32f4d20c79240..b1741a9d5be24005c3a689044cbcec942707e54c 100644 (file)
 
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
 #include <linux/reset.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_device.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
 
 #include <video/videomode.h>
 
@@ -691,7 +698,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
                                   struct drm_plane_state *state)
 {
        struct drm_framebuffer *fb = state->fb;
-       u32 src_x, src_y, src_w, src_h;
+       u32 src_w, src_h;
 
        DRM_DEBUG_DRIVER("\n");
 
@@ -699,8 +706,6 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
                return 0;
 
        /* convert src_ from 16:16 format */
-       src_x = state->src_x >> 16;
-       src_y = state->src_y >> 16;
        src_w = state->src_w >> 16;
        src_h = state->src_h >> 16;
 
index 9e9255ee59cd83138f5572f0d31df50aec9cc244..990847cb40f6588fd11ce41f4ec9b8e15aa9b912 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/component.h>
 #include <linux/list.h>
@@ -45,28 +45,6 @@ static const u32 sunxi_rgb2yuv_coef[12] = {
        0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
 };
 
-/*
- * These coefficients are taken from the A33 BSP from Allwinner.
- *
- * The first three values of each row are coded as 13-bit signed fixed-point
- * numbers, with 10 bits for the fractional part. The fourth value is a
- * constant coded as a 14-bit signed fixed-point number with 4 bits for the
- * fractional part.
- *
- * The values in table order give the following colorspace translation:
- * G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
- * R = 1.164 * Y + 1.596 * V - 222
- * B = 1.164 * Y + 2.018 * U + 276
- *
- * This seems to be a conversion from Y[16:235] UV[16:240] to RGB[0:255],
- * following the BT601 spec.
- */
-static const u32 sunxi_bt601_yuv2rgb_coef[12] = {
-       0x000004a7, 0x00001e6f, 0x00001cbf, 0x00000877,
-       0x000004a7, 0x00000000, 0x00000662, 0x00003211,
-       0x000004a7, 0x00000812, 0x00000000, 0x00002eb1,
-};
-
 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
 {
        int i;
@@ -163,7 +141,6 @@ static const uint32_t sun4i_backend_formats[] = {
        DRM_FORMAT_ARGB1555,
        DRM_FORMAT_ARGB4444,
        DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_BGRX8888,
        DRM_FORMAT_RGB565,
        DRM_FORMAT_RGB888,
        DRM_FORMAT_RGBA4444,
@@ -245,7 +222,8 @@ static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
                           SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
 
        /* TODO: Add support for the multi-planar YUV formats */
-       if (format->num_planes == 1)
+       if (drm_format_info_is_yuv_packed(format) &&
+           drm_format_info_is_yuv_sampling_422(format))
                val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
        else
                DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
@@ -1033,6 +1011,10 @@ static const struct of_device_id sun4i_backend_of_table[] = {
                .compatible = "allwinner,sun7i-a20-display-backend",
                .data = &sun7i_backend_quirks,
        },
+       {
+               .compatible = "allwinner,sun8i-a23-display-backend",
+               .data = &sun8i_a33_backend_quirks,
+       },
        {
                .compatible = "allwinner,sun8i-a33-display-backend",
                .data = &sun8i_a33_backend_quirks,
index 3eedf335a935c72b385657691d6c4290b7b9a954..cdb881e34470c60d32363c4fcd6700c12508d506 100644 (file)
@@ -13,8 +13,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_modes.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/clk-provider.h>
 #include <linux/ioport.h>
index 9e4c375ccc96fac1edfed181f9ffe2de540ce2be..3ebd9f5e2719d7f028c2c87b1e2cedd6c60a5365 100644 (file)
 #include <linux/of_reserved_mem.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sun4i_drv.h"
 #include "sun4i_frontend.h"
@@ -97,6 +97,7 @@ static int sun4i_drv_bind(struct device *dev)
        }
 
        drm_mode_config_init(drm);
+       drm->mode_config.allow_fb_modifiers = true;
 
        ret = component_bind_all(drm->dev, drm);
        if (ret) {
@@ -164,6 +165,7 @@ static bool sun4i_drv_node_is_frontend(struct device_node *node)
                of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
                of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
                of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
+               of_device_is_compatible(node, "allwinner,sun8i-a23-display-frontend") ||
                of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend") ||
                of_device_is_compatible(node, "allwinner,sun9i-a80-display-frontend");
 }
@@ -403,6 +405,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
        { .compatible = "allwinner,sun6i-a31-display-engine" },
        { .compatible = "allwinner,sun6i-a31s-display-engine" },
        { .compatible = "allwinner,sun7i-a20-display-engine" },
+       { .compatible = "allwinner,sun8i-a23-display-engine" },
        { .compatible = "allwinner,sun8i-a33-display-engine" },
        { .compatible = "allwinner,sun8i-a83t-display-engine" },
        { .compatible = "allwinner,sun8i-h3-display-engine" },
index 1a7ebc45747ec9d9c498adb6a793625661eff945..346c8071bd383c86388cac8991a04189e80af2af 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -48,10 +49,38 @@ static const u32 sun4i_frontend_horz_coef[64] = {
        0x03ff0000, 0x0000fd41, 0x01ff0000, 0x0000fe42,
 };
 
+/*
+ * These coefficients are taken from the A33 BSP from Allwinner.
+ *
+ * The first three values of each row are coded as 13-bit signed fixed-point
+ * numbers, with 10 bits for the fractional part. The fourth value is a
+ * constant coded as a 14-bit signed fixed-point number with 4 bits for the
+ * fractional part.
+ *
+ * The values in table order give the following colorspace translation:
+ * G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
+ * R = 1.164 * Y + 1.596 * V - 222
+ * B = 1.164 * Y + 2.018 * U + 276
+ *
+ * This seems to be a conversion from Y[16:235] UV[16:240] to RGB[0:255],
+ * following the BT601 spec.
+ */
+const u32 sunxi_bt601_yuv2rgb_coef[12] = {
+       0x000004a7, 0x00001e6f, 0x00001cbf, 0x00000877,
+       0x000004a7, 0x00000000, 0x00000662, 0x00003211,
+       0x000004a7, 0x00000812, 0x00000000, 0x00002eb1,
+};
+EXPORT_SYMBOL(sunxi_bt601_yuv2rgb_coef);
+
 static void sun4i_frontend_scaler_init(struct sun4i_frontend *frontend)
 {
        int i;
 
+       if (frontend->data->has_coef_access_ctrl)
+               regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
+                                 SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL,
+                                 SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL);
+
        for (i = 0; i < 32; i++) {
                regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZCOEF0_REG(i),
                             sun4i_frontend_horz_coef[2 * i]);
@@ -67,9 +96,11 @@ static void sun4i_frontend_scaler_init(struct sun4i_frontend *frontend)
                             sun4i_frontend_vert_coef[i]);
        }
 
-       regmap_update_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
-                          SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL,
-                          SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL);
+       if (frontend->data->has_coef_rdy)
+               regmap_write_bits(frontend->regs,
+                                 SUN4I_FRONTEND_FRM_CTRL_REG,
+                                 SUN4I_FRONTEND_FRM_CTRL_COEF_RDY,
+                                 SUN4I_FRONTEND_FRM_CTRL_COEF_RDY);
 }
 
 int sun4i_frontend_init(struct sun4i_frontend *frontend)
@@ -84,59 +115,228 @@ void sun4i_frontend_exit(struct sun4i_frontend *frontend)
 }
 EXPORT_SYMBOL(sun4i_frontend_exit);
 
+static bool sun4i_frontend_format_chroma_requires_swap(uint32_t fmt)
+{
+       switch (fmt) {
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YVU444:
+               return true;
+
+       default:
+               return false;
+       }
+}
+
+static bool sun4i_frontend_format_supports_tiling(uint32_t fmt)
+{
+       switch (fmt) {
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YVU411:
+               return true;
+
+       default:
+               return false;
+       }
+}
+
 void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
                                  struct drm_plane *plane)
 {
        struct drm_plane_state *state = plane->state;
        struct drm_framebuffer *fb = state->fb;
+       unsigned int strides[3] = {};
+
        dma_addr_t paddr;
+       bool swap;
+
+       if (fb->modifier == DRM_FORMAT_MOD_ALLWINNER_TILED) {
+               unsigned int width = state->src_w >> 16;
+               unsigned int offset;
+
+               strides[0] = SUN4I_FRONTEND_LINESTRD_TILED(fb->pitches[0]);
+
+               /*
+                * The X1 offset is the offset to the bottom-right point in the
+                * end tile, which is the final pixel (at offset width - 1)
+                * within the end tile (with a 32-byte mask).
+                */
+               offset = (width - 1) & (32 - 1);
+
+               regmap_write(frontend->regs, SUN4I_FRONTEND_TB_OFF0_REG,
+                            SUN4I_FRONTEND_TB_OFF_X1(offset));
+
+               if (fb->format->num_planes > 1) {
+                       strides[1] =
+                               SUN4I_FRONTEND_LINESTRD_TILED(fb->pitches[1]);
+
+                       regmap_write(frontend->regs, SUN4I_FRONTEND_TB_OFF1_REG,
+                                    SUN4I_FRONTEND_TB_OFF_X1(offset));
+               }
+
+               if (fb->format->num_planes > 2) {
+                       strides[2] =
+                               SUN4I_FRONTEND_LINESTRD_TILED(fb->pitches[2]);
+
+                       regmap_write(frontend->regs, SUN4I_FRONTEND_TB_OFF2_REG,
+                                    SUN4I_FRONTEND_TB_OFF_X1(offset));
+               }
+       } else {
+               strides[0] = fb->pitches[0];
+
+               if (fb->format->num_planes > 1)
+                       strides[1] = fb->pitches[1];
+
+               if (fb->format->num_planes > 2)
+                       strides[2] = fb->pitches[2];
+       }
 
        /* Set the line width */
        DRM_DEBUG_DRIVER("Frontend stride: %d bytes\n", fb->pitches[0]);
        regmap_write(frontend->regs, SUN4I_FRONTEND_LINESTRD0_REG,
-                    fb->pitches[0]);
+                    strides[0]);
+
+       if (fb->format->num_planes > 1)
+               regmap_write(frontend->regs, SUN4I_FRONTEND_LINESTRD1_REG,
+                            strides[1]);
+
+       if (fb->format->num_planes > 2)
+               regmap_write(frontend->regs, SUN4I_FRONTEND_LINESTRD2_REG,
+                            strides[2]);
+
+       /* Some planar formats require chroma channel swapping by hand. */
+       swap = sun4i_frontend_format_chroma_requires_swap(fb->format->format);
 
        /* Set the physical address of the buffer in memory */
        paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
        paddr -= PHYS_OFFSET;
-       DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+       DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
        regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
+
+       if (fb->format->num_planes > 1) {
+               paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1);
+               paddr -= PHYS_OFFSET;
+               DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
+               regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
+                            paddr);
+       }
+
+       if (fb->format->num_planes > 2) {
+               paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2);
+               paddr -= PHYS_OFFSET;
+               DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
+               regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
+                            paddr);
+       }
 }
 EXPORT_SYMBOL(sun4i_frontend_update_buffer);
 
-static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
+static int
+sun4i_frontend_drm_format_to_input_fmt(const struct drm_format_info *format,
+                                      u32 *val)
 {
-       switch (fmt) {
-       case DRM_FORMAT_XRGB8888:
+       if (!format->is_yuv)
                *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB;
-               return 0;
-
-       default:
+       else if (drm_format_info_is_yuv_sampling_411(format))
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV411;
+       else if (drm_format_info_is_yuv_sampling_420(format))
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV420;
+       else if (drm_format_info_is_yuv_sampling_422(format))
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV422;
+       else if (drm_format_info_is_yuv_sampling_444(format))
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV444;
+       else
                return -EINVAL;
-       }
+
+       return 0;
 }
 
-static int sun4i_frontend_drm_format_to_input_mode(uint32_t fmt, u32 *val)
+static int
+sun4i_frontend_drm_format_to_input_mode(const struct drm_format_info *format,
+                                       uint64_t modifier, u32 *val)
 {
-       if (drm_format_num_planes(fmt) == 1)
+       bool tiled = (modifier == DRM_FORMAT_MOD_ALLWINNER_TILED);
+
+       switch (format->num_planes) {
+       case 1:
                *val = SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED;
-       else
-               return -EINVAL;
+               return 0;
 
-       return 0;
+       case 2:
+               *val = tiled ? SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_MB32_SEMIPLANAR
+                            : SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_SEMIPLANAR;
+               return 0;
+
+       case 3:
+               *val = tiled ? SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_MB32_PLANAR
+                            : SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PLANAR;
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
 }
 
-static int sun4i_frontend_drm_format_to_input_sequence(uint32_t fmt, u32 *val)
+static int
+sun4i_frontend_drm_format_to_input_sequence(const struct drm_format_info *format,
+                                           u32 *val)
 {
-       switch (fmt) {
+       /* Planar formats have an explicit input sequence. */
+       if (drm_format_info_is_yuv_planar(format)) {
+               *val = 0;
+               return 0;
+       }
+
+       switch (format->format) {
        case DRM_FORMAT_BGRX8888:
                *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX;
                return 0;
 
+       case DRM_FORMAT_NV12:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UV;
+               return 0;
+
+       case DRM_FORMAT_NV16:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UV;
+               return 0;
+
+       case DRM_FORMAT_NV21:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VU;
+               return 0;
+
+       case DRM_FORMAT_NV61:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VU;
+               return 0;
+
+       case DRM_FORMAT_UYVY:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UYVY;
+               return 0;
+
+       case DRM_FORMAT_VYUY:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VYUY;
+               return 0;
+
        case DRM_FORMAT_XRGB8888:
                *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB;
                return 0;
 
+       case DRM_FORMAT_YUYV:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_YUYV;
+               return 0;
+
+       case DRM_FORMAT_YVYU:
+               *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_YVYU;
+               return 0;
+
        default:
                return -EINVAL;
        }
@@ -160,14 +360,32 @@ static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
 
 static const uint32_t sun4i_frontend_formats[] = {
        DRM_FORMAT_BGRX8888,
+       DRM_FORMAT_NV12,
+       DRM_FORMAT_NV16,
+       DRM_FORMAT_NV21,
+       DRM_FORMAT_NV61,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
        DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUV411,
+       DRM_FORMAT_YUV420,
+       DRM_FORMAT_YUV422,
+       DRM_FORMAT_YUV444,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVU411,
+       DRM_FORMAT_YVU420,
+       DRM_FORMAT_YVU422,
+       DRM_FORMAT_YVU444,
+       DRM_FORMAT_YVYU,
 };
 
 bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier)
 {
        unsigned int i;
 
-       if (modifier != DRM_FORMAT_MOD_LINEAR)
+       if (modifier == DRM_FORMAT_MOD_ALLWINNER_TILED)
+               return sun4i_frontend_format_supports_tiling(fmt);
+       else if (modifier != DRM_FORMAT_MOD_LINEAR)
                return false;
 
        for (i = 0; i < ARRAY_SIZE(sun4i_frontend_formats); i++)
@@ -183,9 +401,12 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
 {
        struct drm_plane_state *state = plane->state;
        struct drm_framebuffer *fb = state->fb;
-       uint32_t format = fb->format->format;
+       const struct drm_format_info *format = fb->format;
+       uint64_t modifier = fb->modifier;
        u32 out_fmt_val;
        u32 in_fmt_val, in_mod_val, in_ps_val;
+       unsigned int i;
+       u32 bypass;
        int ret;
 
        ret = sun4i_frontend_drm_format_to_input_fmt(format, &in_fmt_val);
@@ -194,7 +415,8 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
                return ret;
        }
 
-       ret = sun4i_frontend_drm_format_to_input_mode(format, &in_mod_val);
+       ret = sun4i_frontend_drm_format_to_input_mode(format, modifier,
+                                                     &in_mod_val);
        if (ret) {
                DRM_DEBUG_DRIVER("Invalid input mode\n");
                return ret;
@@ -216,16 +438,39 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
         * I have no idea what this does exactly, but it seems to be
         * related to the scaler FIR filter phase parameters.
         */
-       regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG, 0x400);
-       regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG, 0x400);
-       regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG, 0x400);
-       regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG, 0x400);
-       regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, 0x400);
-       regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, 0x400);
+       regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG,
+                    frontend->data->ch_phase[0].horzphase);
+       regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG,
+                    frontend->data->ch_phase[1].horzphase);
+       regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG,
+                    frontend->data->ch_phase[0].vertphase[0]);
+       regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG,
+                    frontend->data->ch_phase[1].vertphase[0]);
+       regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG,
+                    frontend->data->ch_phase[0].vertphase[1]);
+       regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG,
+                    frontend->data->ch_phase[1].vertphase[1]);
+
+       /*
+        * Checking the input format is sufficient since we currently only
+        * support RGB output formats to the backend. If YUV output formats
+        * ever get supported, an YUV input and output would require bypassing
+        * the CSC engine too.
+        */
+       if (format->is_yuv) {
+               /* Setup the CSC engine for YUV to RGB conversion. */
+               bypass = 0;
+
+               for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
+                       regmap_write(frontend->regs,
+                                    SUN4I_FRONTEND_CSC_COEF_REG(i),
+                                    sunxi_bt601_yuv2rgb_coef[i]);
+       } else {
+               bypass = SUN4I_FRONTEND_BYPASS_CSC_EN;
+       }
 
        regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
-                          SUN4I_FRONTEND_BYPASS_CSC_EN,
-                          SUN4I_FRONTEND_BYPASS_CSC_EN);
+                          SUN4I_FRONTEND_BYPASS_CSC_EN, bypass);
 
        regmap_write(frontend->regs, SUN4I_FRONTEND_INPUT_FMT_REG,
                     in_mod_val | in_fmt_val | in_ps_val);
@@ -321,6 +566,10 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
        frontend->dev = dev;
        frontend->node = dev->of_node;
 
+       frontend->data = of_device_get_match_data(dev);
+       if (!frontend->data)
+               return -ENODEV;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(regs))
@@ -433,8 +682,51 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = {
        .runtime_suspend        = sun4i_frontend_runtime_suspend,
 };
 
+static const struct sun4i_frontend_data sun4i_a10_frontend = {
+       .ch_phase               = {
+               {
+                       .horzphase = 0,
+                       .vertphase = { 0, 0 },
+               },
+               {
+                       .horzphase = 0xfc000,
+                       .vertphase = { 0xfc000, 0xfc000 },
+               },
+       },
+       .has_coef_rdy           = true,
+};
+
+static const struct sun4i_frontend_data sun8i_a33_frontend = {
+       .ch_phase               = {
+               {
+                       .horzphase = 0x400,
+                       .vertphase = { 0x400, 0x400 },
+               },
+               {
+                       .horzphase = 0x400,
+                       .vertphase = { 0x400, 0x400 },
+               },
+       },
+       .has_coef_access_ctrl   = true,
+};
+
 const struct of_device_id sun4i_frontend_of_table[] = {
-       { .compatible = "allwinner,sun8i-a33-display-frontend" },
+       {
+               .compatible = "allwinner,sun4i-a10-display-frontend",
+               .data = &sun4i_a10_frontend
+       },
+       {
+               .compatible = "allwinner,sun7i-a20-display-frontend",
+               .data = &sun4i_a10_frontend
+       },
+       {
+               .compatible = "allwinner,sun8i-a23-display-frontend",
+               .data = &sun8i_a33_frontend
+       },
+       {
+               .compatible = "allwinner,sun8i-a33-display-frontend",
+               .data = &sun8i_a33_frontend
+       },
        { }
 };
 EXPORT_SYMBOL(sun4i_frontend_of_table);
index ad146e8d8d70d1d05e82420cd45bb0f7e4257bef..0c382c1ddb0fe81b0eb8f63999d0f6e2856d650c 100644 (file)
 #define SUN4I_FRONTEND_BYPASS_CSC_EN                   BIT(1)
 
 #define SUN4I_FRONTEND_BUF_ADDR0_REG           0x020
+#define SUN4I_FRONTEND_BUF_ADDR1_REG           0x024
+#define SUN4I_FRONTEND_BUF_ADDR2_REG           0x028
+
+#define SUN4I_FRONTEND_TB_OFF0_REG             0x030
+#define SUN4I_FRONTEND_TB_OFF1_REG             0x034
+#define SUN4I_FRONTEND_TB_OFF2_REG             0x038
+#define SUN4I_FRONTEND_TB_OFF_X1(x1)                   ((x1) << 16)
+#define SUN4I_FRONTEND_TB_OFF_Y0(y0)                   ((y0) << 8)
+#define SUN4I_FRONTEND_TB_OFF_X0(x0)                   (x0)
 
 #define SUN4I_FRONTEND_LINESTRD0_REG           0x040
+#define SUN4I_FRONTEND_LINESTRD1_REG           0x044
+#define SUN4I_FRONTEND_LINESTRD2_REG           0x048
+
+/*
+ * In tiled mode, the stride is defined as the distance between the start of the
+ * end line of the current tile and the start of the first line in the next
+ * vertical tile.
+ *
+ * Tiles are represented in row-major order, thus the end line of current tile
+ * starts at: 31 * 32 (31 lines of 32 cols), the next vertical tile starts at:
+ * 32-bit-aligned-width * 32 and the distance is:
+ * 32 * (32-bit-aligned-width - 31).
+ */
+#define SUN4I_FRONTEND_LINESTRD_TILED(stride)          (((stride) - 31) * 32)
 
 #define SUN4I_FRONTEND_INPUT_FMT_REG           0x04c
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PLANAR       (0 << 8)
 #define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED       (1 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_SEMIPLANAR   (2 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_MB32_PLANAR  (4 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_MB32_SEMIPLANAR (6 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV444       (0 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV422       (1 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV420       (2 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV411       (3 << 4)
 #define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB          (5 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UYVY          0
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_YUYV          1
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VYUY          2
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_YVYU          3
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UV            0
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VU            1
 #define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX          0
 #define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB          1
 
@@ -35,6 +72,8 @@
 #define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888    1
 #define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888    2
 
+#define SUN4I_FRONTEND_CSC_COEF_REG(c)         (0x070 + (0x4 * (c)))
+
 #define SUN4I_FRONTEND_CH0_INSIZE_REG          0x100
 #define SUN4I_FRONTEND_INSIZE(h, w)                    ((((h) - 1) << 16) | (((w) - 1)))
 
@@ -73,6 +112,16 @@ struct drm_plane;
 struct regmap;
 struct reset_control;
 
+struct sun4i_frontend_data {
+       bool    has_coef_access_ctrl;
+       bool    has_coef_rdy;
+
+       struct {
+               u32     horzphase;
+               u32     vertphase[2];
+       } ch_phase[2];
+};
+
 struct sun4i_frontend {
        struct list_head        list;
        struct device           *dev;
@@ -83,9 +132,12 @@ struct sun4i_frontend {
        struct clk              *ram_clk;
        struct regmap           *regs;
        struct reset_control    *reset;
+
+       const struct sun4i_frontend_data        *data;
 };
 
 extern const struct of_device_id sun4i_frontend_of_table[];
+extern const u32 sunxi_bt601_yuv2rgb_coef[12];
 
 int sun4i_frontend_init(struct sun4i_frontend *frontend);
 void sun4i_frontend_exit(struct sun4i_frontend *frontend);
index 061d2e0d9011ee88991b3f0fb1b4e2dd54925bee..d95c6e224bd9a05a55d5b823631838605b8a82bd 100644 (file)
@@ -11,7 +11,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_of.h>
@@ -52,7 +52,8 @@ static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi,
        u8 buffer[17];
        int i, ret;
 
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+                                                      &hdmi->connector, mode);
        if (ret < 0) {
                DRM_ERROR("Failed to get infoframes from mode\n");
                return ret;
index 29631e0efde37ce709023995634dceac8c412f6a..a514fe88d4416ad42418527a35df6a5f05a253f1 100644 (file)
@@ -114,6 +114,18 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
        sun4i_backend_layer_enable(backend, layer->id, true);
 }
 
+static bool sun4i_layer_format_mod_supported(struct drm_plane *plane,
+                                            uint32_t format, uint64_t modifier)
+{
+       struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
+
+       if (IS_ERR_OR_NULL(layer->backend->frontend))
+               sun4i_backend_format_is_supported(format, modifier);
+
+       return sun4i_backend_format_is_supported(format, modifier) ||
+              sun4i_frontend_format_is_supported(format, modifier);
+}
+
 static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
        .prepare_fb     = drm_gem_fb_prepare_fb,
        .atomic_disable = sun4i_backend_layer_atomic_disable,
@@ -127,6 +139,7 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
        .disable_plane          = drm_atomic_helper_disable_plane,
        .reset                  = sun4i_backend_layer_reset,
        .update_plane           = drm_atomic_helper_update_plane,
+       .format_mod_supported   = sun4i_layer_format_mod_supported,
 };
 
 static const uint32_t sun4i_layer_formats[] = {
@@ -138,17 +151,53 @@ static const uint32_t sun4i_layer_formats[] = {
        DRM_FORMAT_RGBA4444,
        DRM_FORMAT_RGB888,
        DRM_FORMAT_RGB565,
+       DRM_FORMAT_NV12,
+       DRM_FORMAT_NV16,
+       DRM_FORMAT_NV21,
+       DRM_FORMAT_NV61,
        DRM_FORMAT_UYVY,
        DRM_FORMAT_VYUY,
        DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUV411,
+       DRM_FORMAT_YUV420,
+       DRM_FORMAT_YUV422,
+       DRM_FORMAT_YUV444,
        DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVU411,
+       DRM_FORMAT_YVU420,
+       DRM_FORMAT_YVU422,
+       DRM_FORMAT_YVU444,
        DRM_FORMAT_YVYU,
 };
 
+static const uint32_t sun4i_backend_layer_formats[] = {
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ARGB4444,
+       DRM_FORMAT_ARGB1555,
+       DRM_FORMAT_RGBA5551,
+       DRM_FORMAT_RGBA4444,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+};
+
+static const uint64_t sun4i_layer_modifiers[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_ALLWINNER_TILED,
+       DRM_FORMAT_MOD_INVALID
+};
+
 static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
                                                struct sun4i_backend *backend,
                                                enum drm_plane_type type)
 {
+       const uint64_t *modifiers = sun4i_layer_modifiers;
+       const uint32_t *formats = sun4i_layer_formats;
+       unsigned int formats_len = ARRAY_SIZE(sun4i_layer_formats);
        struct sun4i_layer *layer;
        int ret;
 
@@ -156,12 +205,19 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
        if (!layer)
                return ERR_PTR(-ENOMEM);
 
+       layer->backend = backend;
+
+       if (IS_ERR_OR_NULL(backend->frontend)) {
+               formats = sun4i_backend_layer_formats;
+               formats_len = ARRAY_SIZE(sun4i_backend_layer_formats);
+               modifiers = NULL;
+       }
+
        /* possible crtcs are set later */
        ret = drm_universal_plane_init(drm, &layer->plane, 0,
                                       &sun4i_backend_layer_funcs,
-                                      sun4i_layer_formats,
-                                      ARRAY_SIZE(sun4i_layer_formats),
-                                      NULL, type, NULL);
+                                      formats, formats_len,
+                                      modifiers, type, NULL);
        if (ret) {
                dev_err(drm->dev, "Couldn't initialize layer\n");
                return ERR_PTR(ret);
@@ -169,7 +225,6 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
 
        drm_plane_helper_add(&layer->plane,
                             &sun4i_backend_layer_helper_funcs);
-       layer->backend = backend;
 
        drm_plane_create_alpha_property(&layer->plane);
        drm_plane_create_zpos_property(&layer->plane, 0, 0,
index e7eb0d1e17be5e6575550f9e6c12d9b96f92a0d5..147b97ed1a091e0d257b7a8f212ad515e1a3ca0f 100644 (file)
@@ -8,9 +8,9 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
index f4a22689eb54c238f96626c03d8271d70ce645b8..cae19e7bbeaa4455c35c6c0ccd591717bfe1d4aa 100644 (file)
@@ -14,9 +14,9 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
index 0420f5c978b9d641926150f0d7987331b9fdb55d..2bd2eda6480aeb5a343a01e1b78e6b1c07cf9920 100644 (file)
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_connector.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include <uapi/drm/drm_mode.h>
 
@@ -1494,6 +1494,7 @@ const struct of_device_id sun4i_tcon_of_table[] = {
        { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
        { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
        { .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
+       { .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks },
        { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
        { .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
        { .compatible = "allwinner,sun8i-a83t-tcon-tv", .data = &sun8i_a83t_tv_quirks },
index 1a838d2082110679324824e4a235b45691e5556a..e8700a362064887f0d72d89191440a559bd43856 100644 (file)
@@ -18,9 +18,9 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_drv.h"
index 88eb268fdf7328ce40a166c96a22a10efff5c815..442094a4af7a90eb64e8246f19c26e3a33902569 100644 (file)
@@ -101,6 +101,7 @@ static int sun6i_drc_remove(struct platform_device *pdev)
 static const struct of_device_id sun6i_drc_of_table[] = {
        { .compatible = "allwinner,sun6i-a31-drc" },
        { .compatible = "allwinner,sun6i-a31s-drc" },
+       { .compatible = "allwinner,sun8i-a23-drc" },
        { .compatible = "allwinner,sun8i-a33-drc" },
        { .compatible = "allwinner,sun9i-a80-drc" },
        { }
index e3b34a3455460fbab688148f6d19c2ed0607f4f4..1ebe56817fa988537bb2d5edc1c70887fb531183 100644 (file)
@@ -19,9 +19,9 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "sun4i_drv.h"
 #include "sun6i_mipi_dsi.h"
index 44a9ba7d843336b703f5bded9af69a3e31a19c33..30a2eff55687b0ddc6e18bc006714fb67ecff84a 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/component.h>
 #include <linux/dma-mapping.h>
index 18534263a05d827fe4e87b5e65355f98094ffb46..a342ec8b131e568c6066ab1ca789ba69c62a01c0 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include "sun8i_ui_layer.h"
index 87be898f9b7a090cf732e9c9b2187e1986031aaf..8a0616238467ae898aada2b17d7e3ae36e62b9fb 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include "sun8i_vi_layer.h"
index 2e0d6213f6bcd3a79b8ba4e411f92dd9d606b424..33c463e8d49f9adeeac9688f5cca8073a9a9aa77 100644 (file)
@@ -10,6 +10,7 @@ tegra-drm-y := \
        dc.o \
        output.o \
        rgb.o \
+       hda.o \
        hdmi.o \
        mipi-phy.o \
        dsi.o \
index 4b70ce664c4185e36f8e173dfd43353eee404fa5..0c5f1e6a04467e7a300700afb776a71019cca09d 100644 (file)
@@ -92,10 +92,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
                return -ENOMEM;
 
        if (iommu_present(&platform_bus_type)) {
-               u64 carveout_start, carveout_end, gem_start, gem_end;
-               struct iommu_domain_geometry *geometry;
-               unsigned long order;
-
                tegra->domain = iommu_domain_alloc(&platform_bus_type);
                if (!tegra->domain) {
                        err = -ENOMEM;
@@ -105,27 +101,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
                err = iova_cache_get();
                if (err < 0)
                        goto domain;
-
-               geometry = &tegra->domain->geometry;
-               gem_start = geometry->aperture_start;
-               gem_end = geometry->aperture_end - CARVEOUT_SZ;
-               carveout_start = gem_end + 1;
-               carveout_end = geometry->aperture_end;
-
-               order = __ffs(tegra->domain->pgsize_bitmap);
-               init_iova_domain(&tegra->carveout.domain, 1UL << order,
-                                carveout_start >> order);
-
-               tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
-               tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
-
-               drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
-               mutex_init(&tegra->mm_lock);
-
-               DRM_DEBUG("IOMMU apertures:\n");
-               DRM_DEBUG("  GEM: %#llx-%#llx\n", gem_start, gem_end);
-               DRM_DEBUG("  Carveout: %#llx-%#llx\n", carveout_start,
-                         carveout_end);
        }
 
        mutex_init(&tegra->clients_lock);
@@ -159,6 +134,36 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
        if (err < 0)
                goto fbdev;
 
+       if (tegra->domain) {
+               u64 carveout_start, carveout_end, gem_start, gem_end;
+               u64 dma_mask = dma_get_mask(&device->dev);
+               dma_addr_t start, end;
+               unsigned long order;
+
+               start = tegra->domain->geometry.aperture_start & dma_mask;
+               end = tegra->domain->geometry.aperture_end & dma_mask;
+
+               gem_start = start;
+               gem_end = end - CARVEOUT_SZ;
+               carveout_start = gem_end + 1;
+               carveout_end = end;
+
+               order = __ffs(tegra->domain->pgsize_bitmap);
+               init_iova_domain(&tegra->carveout.domain, 1UL << order,
+                                carveout_start >> order);
+
+               tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+               tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+               drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+               mutex_init(&tegra->mm_lock);
+
+               DRM_DEBUG("IOMMU apertures:\n");
+               DRM_DEBUG("  GEM: %#llx-%#llx\n", gem_start, gem_end);
+               DRM_DEBUG("  Carveout: %#llx-%#llx\n", carveout_start,
+                         carveout_end);
+       }
+
        if (tegra->hub) {
                err = tegra_display_hub_prepare(tegra->hub);
                if (err < 0)
@@ -1041,6 +1046,7 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
 {
        mutex_lock(&tegra->clients_lock);
        list_add_tail(&client->list, &tegra->clients);
+       client->drm = tegra;
        mutex_unlock(&tegra->clients_lock);
 
        return 0;
@@ -1051,6 +1057,7 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
 {
        mutex_lock(&tegra->clients_lock);
        list_del_init(&client->list);
+       client->drm = NULL;
        mutex_unlock(&tegra->clients_lock);
 
        return 0;
index 1012335bb4892ff0c9db6896ad72888b07852c24..70154c253d4566b47cba2c30b72dba1a4bf7a687 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fixed.h>
+#include <drm/drm_probe_helper.h>
 
 #include "gem.h"
 #include "hub.h"
@@ -88,6 +88,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 struct tegra_drm_client {
        struct host1x_client base;
        struct list_head list;
+       struct tegra_drm *drm;
 
        unsigned int version;
        const struct tegra_drm_client_ops *ops;
@@ -124,7 +125,7 @@ struct tegra_output {
        struct drm_panel *panel;
        struct i2c_adapter *ddc;
        const struct edid *edid;
-       struct cec_notifier *notifier;
+       struct cec_notifier *cec;
        unsigned int hpd_irq;
        int hpd_gpio;
        enum of_gpio_flags hpd_gpio_flags;
index b947e82bbeb1c287c60c24ee86a3f59936d2c821..0a4ce05e00ab9c9126010d1d1e8f80a0a97fbb6d 100644 (file)
@@ -15,6 +15,7 @@
 #include "drm.h"
 #include "gem.h"
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
@@ -255,7 +256,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
        helper->fbdev = info;
 
        info->par = helper;
-       info->flags = FBINFO_FLAG_DEFAULT;
        info->fbops = &tegra_fb_ops;
 
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
diff --git a/drivers/gpu/drm/tegra/hda.c b/drivers/gpu/drm/tegra/hda.c
new file mode 100644 (file)
index 0000000..94245a1
--- /dev/null
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2019 NVIDIA Corporation
+ */
+
+#include <linux/bug.h>
+
+#include <sound/hda_verbs.h>
+
+#include "hda.h"
+
+void tegra_hda_parse_format(unsigned int format, struct tegra_hda_format *fmt)
+{
+       unsigned int mul, div, bits, channels;
+
+       if (format & AC_FMT_TYPE_NON_PCM)
+               fmt->pcm = false;
+       else
+               fmt->pcm = true;
+
+       if (format & AC_FMT_BASE_44K)
+               fmt->sample_rate = 44100;
+       else
+               fmt->sample_rate = 48000;
+
+       mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+       div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+       fmt->sample_rate *= (mul + 1) / (div + 1);
+
+       switch (format & AC_FMT_BITS_MASK) {
+       case AC_FMT_BITS_8:
+               fmt->bits = 8;
+               break;
+
+       case AC_FMT_BITS_16:
+               fmt->bits = 16;
+               break;
+
+       case AC_FMT_BITS_20:
+               fmt->bits = 20;
+               break;
+
+       case AC_FMT_BITS_24:
+               fmt->bits = 24;
+               break;
+
+       case AC_FMT_BITS_32:
+               fmt->bits = 32;
+               break;
+
+       default:
+               bits = (format & AC_FMT_BITS_MASK) >> AC_FMT_BITS_SHIFT;
+               WARN(1, "invalid number of bits: %#x\n", bits);
+               fmt->bits = 8;
+               break;
+       }
+
+       channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+
+       /* channels are encoded as n - 1 */
+       fmt->channels = channels + 1;
+}
diff --git a/drivers/gpu/drm/tegra/hda.h b/drivers/gpu/drm/tegra/hda.h
new file mode 100644 (file)
index 0000000..7726995
--- /dev/null
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2019 NVIDIA Corporation
+ */
+
+#ifndef DRM_TEGRA_HDA_H
+#define DRM_TEGRA_HDA_H 1
+
+#include <linux/types.h>
+
+struct tegra_hda_format {
+       unsigned int sample_rate;
+       unsigned int channels;
+       unsigned int bits;
+       bool pcm;
+};
+
+void tegra_hda_parse_format(unsigned int format, struct tegra_hda_format *fmt);
+
+#endif
index 0082468f703c8c12ec89b142142b61b84b22fe89..47c55974756d576b71193219b92d976078006b4e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/hdmi.h>
+#include <linux/math64.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-
-#include <sound/hda_verbs.h>
-
-#include <media/cec-notifier.h>
+#include <drm/drm_probe_helper.h>
 
+#include "hda.h"
 #include "hdmi.h"
 #include "drm.h"
 #include "dc.h"
@@ -71,8 +69,7 @@ struct tegra_hdmi {
        const struct tegra_hdmi_config *config;
 
        unsigned int audio_source;
-       unsigned int audio_sample_rate;
-       unsigned int audio_channels;
+       struct tegra_hda_format format;
 
        unsigned int pixel_clock;
        bool stereo;
@@ -119,68 +116,11 @@ static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value,
 }
 
 struct tegra_hdmi_audio_config {
-       unsigned int pclk;
        unsigned int n;
        unsigned int cts;
        unsigned int aval;
 };
 
-static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
-       {  25200000, 4096,  25200, 24000 },
-       {  27000000, 4096,  27000, 24000 },
-       {  74250000, 4096,  74250, 24000 },
-       { 148500000, 4096, 148500, 24000 },
-       {         0,    0,      0,     0 },
-};
-
-static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
-       {  25200000, 5880,  26250, 25000 },
-       {  27000000, 5880,  28125, 25000 },
-       {  74250000, 4704,  61875, 20000 },
-       { 148500000, 4704, 123750, 20000 },
-       {         0,    0,      0,     0 },
-};
-
-static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
-       {  25200000, 6144,  25200, 24000 },
-       {  27000000, 6144,  27000, 24000 },
-       {  74250000, 6144,  74250, 24000 },
-       { 148500000, 6144, 148500, 24000 },
-       {         0,    0,      0,     0 },
-};
-
-static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
-       {  25200000, 11760,  26250, 25000 },
-       {  27000000, 11760,  28125, 25000 },
-       {  74250000,  9408,  61875, 20000 },
-       { 148500000,  9408, 123750, 20000 },
-       {         0,     0,      0,     0 },
-};
-
-static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
-       {  25200000, 12288,  25200, 24000 },
-       {  27000000, 12288,  27000, 24000 },
-       {  74250000, 12288,  74250, 24000 },
-       { 148500000, 12288, 148500, 24000 },
-       {         0,     0,      0,     0 },
-};
-
-static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
-       {  25200000, 23520,  26250, 25000 },
-       {  27000000, 23520,  28125, 25000 },
-       {  74250000, 18816,  61875, 20000 },
-       { 148500000, 18816, 123750, 20000 },
-       {         0,     0,      0,     0 },
-};
-
-static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
-       {  25200000, 24576,  25200, 24000 },
-       {  27000000, 24576,  27000, 24000 },
-       {  74250000, 24576,  74250, 24000 },
-       { 148500000, 24576, 148500, 24000 },
-       {         0,     0,      0,     0 },
-};
-
 static const struct tmds_config tegra20_tmds_config[] = {
        { /* slow pixel clock modes */
                .pclk = 27000000,
@@ -418,52 +358,53 @@ static const struct tmds_config tegra124_tmds_config[] = {
        },
 };
 
-static const struct tegra_hdmi_audio_config *
-tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk)
+static int
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock,
+                           struct tegra_hdmi_audio_config *config)
 {
-       const struct tegra_hdmi_audio_config *table;
-
-       switch (sample_rate) {
-       case 32000:
-               table = tegra_hdmi_audio_32k;
-               break;
-
-       case 44100:
-               table = tegra_hdmi_audio_44_1k;
-               break;
-
-       case 48000:
-               table = tegra_hdmi_audio_48k;
-               break;
-
-       case 88200:
-               table = tegra_hdmi_audio_88_2k;
-               break;
-
-       case 96000:
-               table = tegra_hdmi_audio_96k;
-               break;
-
-       case 176400:
-               table = tegra_hdmi_audio_176_4k;
-               break;
-
-       case 192000:
-               table = tegra_hdmi_audio_192k;
-               break;
-
-       default:
-               return NULL;
-       }
-
-       while (table->pclk) {
-               if (table->pclk == pclk)
-                       return table;
-
-               table++;
+       const unsigned int afreq = 128 * audio_freq;
+       const unsigned int min_n = afreq / 1500;
+       const unsigned int max_n = afreq / 300;
+       const unsigned int ideal_n = afreq / 1000;
+       int64_t min_err = (uint64_t)-1 >> 1;
+       unsigned int min_delta = -1;
+       int n;
+
+       memset(config, 0, sizeof(*config));
+       config->n = -1;
+
+       for (n = min_n; n <= max_n; n++) {
+               uint64_t cts_f, aval_f;
+               unsigned int delta;
+               int64_t cts, err;
+
+               /* compute aval in 48.16 fixed point */
+               aval_f = ((int64_t)24000000 << 16) * n;
+               do_div(aval_f, afreq);
+               /* It should round without any rest */
+               if (aval_f & 0xFFFF)
+                       continue;
+
+               /* Compute cts in 48.16 fixed point */
+               cts_f = ((int64_t)pix_clock << 16) * n;
+               do_div(cts_f, afreq);
+               /* Round it to the nearest integer */
+               cts = (cts_f & ~0xFFFF) + ((cts_f & BIT(15)) << 1);
+
+               delta = abs(n - ideal_n);
+
+               /* Compute the absolute error */
+               err = abs((int64_t)cts_f - cts);
+               if (err < min_err || (err == min_err && delta < min_delta)) {
+                       config->n = n;
+                       config->cts = cts >> 16;
+                       config->aval = aval_f >> 16;
+                       min_delta = delta;
+                       min_err = err;
+               }
        }
 
-       return NULL;
+       return config->n != -1 ? 0 : -EINVAL;
 }
 
 static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
@@ -510,7 +451,7 @@ static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
-               if (regs[i].sample_rate == hdmi->audio_sample_rate) {
+               if (regs[i].sample_rate == hdmi->format.sample_rate) {
                        tegra_hdmi_writel(hdmi, value, regs[i].offset);
                        break;
                }
@@ -519,8 +460,9 @@ static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
 
 static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
 {
-       const struct tegra_hdmi_audio_config *config;
+       struct tegra_hdmi_audio_config config;
        u32 source, value;
+       int err;
 
        switch (hdmi->audio_source) {
        case HDA:
@@ -564,7 +506,7 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
                 * play back system startup sounds early. It is possibly not
                 * needed on Linux at all.
                 */
-               if (hdmi->audio_channels == 2)
+               if (hdmi->format.channels == 2)
                        value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
                else
                        value = 0;
@@ -595,25 +537,28 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
                tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
        }
 
-       config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate,
-                                            hdmi->pixel_clock);
-       if (!config) {
+       err = tegra_hdmi_get_audio_config(hdmi->format.sample_rate,
+                                         hdmi->pixel_clock, &config);
+       if (err < 0) {
                dev_err(hdmi->dev,
                        "cannot set audio to %u Hz at %u Hz pixel clock\n",
-                       hdmi->audio_sample_rate, hdmi->pixel_clock);
-               return -EINVAL;
+                       hdmi->format.sample_rate, hdmi->pixel_clock);
+               return err;
        }
 
+       dev_dbg(hdmi->dev, "audio: pixclk=%u, n=%u, cts=%u, aval=%u\n",
+               hdmi->pixel_clock, config.n, config.cts, config.aval);
+
        tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
 
        value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
-               AUDIO_N_VALUE(config->n - 1);
+               AUDIO_N_VALUE(config.n - 1);
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
 
-       tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+       tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config.n) | ACR_ENABLE,
                          HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
 
-       tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+       tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config.cts),
                          HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
 
        value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
@@ -624,7 +569,7 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
 
        if (hdmi->config->has_hda)
-               tegra_hdmi_write_aval(hdmi, config->aval);
+               tegra_hdmi_write_aval(hdmi, config.aval);
 
        tegra_hdmi_setup_audio_fs_tables(hdmi);
 
@@ -741,7 +686,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
        u8 buffer[17];
        ssize_t err;
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+                                                      &hdmi->output.connector, mode);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
                return;
@@ -787,7 +733,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
                return;
        }
 
-       frame.channels = hdmi->audio_channels;
+       frame.channels = hdmi->format.channels;
 
        err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
        if (err < 0) {
@@ -1589,24 +1535,6 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
 
-static void hda_format_parse(unsigned int format, unsigned int *rate,
-                            unsigned int *channels)
-{
-       unsigned int mul, div;
-
-       if (format & AC_FMT_BASE_44K)
-               *rate = 44100;
-       else
-               *rate = 48000;
-
-       mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
-       div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
-
-       *rate = *rate * (mul + 1) / (div + 1);
-
-       *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
-}
-
 static irqreturn_t tegra_hdmi_irq(int irq, void *data)
 {
        struct tegra_hdmi *hdmi = data;
@@ -1623,14 +1551,9 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
                value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
 
                if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
-                       unsigned int sample_rate, channels;
-
                        format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
 
-                       hda_format_parse(format, &sample_rate, &channels);
-
-                       hdmi->audio_sample_rate = sample_rate;
-                       hdmi->audio_channels = channels;
+                       tegra_hda_parse_format(format, &hdmi->format);
 
                        err = tegra_hdmi_setup_audio(hdmi);
                        if (err < 0) {
@@ -1664,8 +1587,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
        hdmi->dev = &pdev->dev;
 
        hdmi->audio_source = AUTO;
-       hdmi->audio_sample_rate = 48000;
-       hdmi->audio_channels = 2;
        hdmi->stereo = false;
        hdmi->dvi = false;
 
@@ -1709,10 +1630,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
                return PTR_ERR(hdmi->vdd);
        }
 
-       hdmi->output.notifier = cec_notifier_get(&pdev->dev);
-       if (hdmi->output.notifier == NULL)
-               return -ENOMEM;
-
        hdmi->output.dev = &pdev->dev;
 
        err = tegra_output_probe(&hdmi->output);
@@ -1771,9 +1688,6 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
 
        tegra_output_remove(&hdmi->output);
 
-       if (hdmi->output.notifier)
-               cec_notifier_put(hdmi->output.notifier);
-
        return 0;
 }
 
index 71cc3cf60066316a6a8686e79cff9fb29fbe26d4..ba9b3cfb8c3d247fae80f8026cc520936e5b954c 100644 (file)
@@ -19,7 +19,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "drm.h"
 #include "dc.h"
index c662efc7e4139323d4c7d40b447b3466e2c5ce97..9c2b9dad55c301d29560b0c6220eb34c5b42f12b 100644 (file)
@@ -36,7 +36,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
        else if (output->ddc)
                edid = drm_get_edid(connector, output->ddc);
 
-       cec_notifier_set_phys_addr_from_edid(output->notifier, edid);
+       cec_notifier_set_phys_addr_from_edid(output->cec, edid);
        drm_connector_update_edid_property(connector, edid);
 
        if (edid) {
@@ -73,7 +73,7 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
        }
 
        if (status != connector_status_connected)
-               cec_notifier_phys_addr_invalidate(output->notifier);
+               cec_notifier_phys_addr_invalidate(output->cec);
 
        return status;
 }
@@ -174,11 +174,18 @@ int tegra_output_probe(struct tegra_output *output)
                disable_irq(output->hpd_irq);
        }
 
+       output->cec = cec_notifier_get(output->dev);
+       if (!output->cec)
+               return -ENOMEM;
+
        return 0;
 }
 
 void tegra_output_remove(struct tegra_output *output)
 {
+       if (output->cec)
+               cec_notifier_put(output->cec);
+
        if (gpio_is_valid(output->hpd_gpio)) {
                free_irq(output->hpd_irq, output);
                gpio_free(output->hpd_gpio);
index ef8692b7075ab0f82262b0aae3cb6c57f160d160..40057106f5f39e7c6bb29772e7f7c463b6aa34e1 100644 (file)
@@ -19,8 +19,6 @@
 
 #include <soc/tegra/pmc.h>
 
-#include <sound/hda_verbs.h>
-
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_panel.h>
@@ -28,6 +26,7 @@
 
 #include "dc.h"
 #include "drm.h"
+#include "hda.h"
 #include "sor.h"
 #include "trace.h"
 
@@ -411,6 +410,8 @@ struct tegra_sor {
        struct clk *clk_dp;
        struct clk *clk;
 
+       u8 xbar_cfg[5];
+
        struct drm_dp_aux *aux;
 
        struct drm_info_list *debugfs_files;
@@ -429,10 +430,7 @@ struct tegra_sor {
        struct delayed_work scdc;
        bool scdc_enabled;
 
-       struct {
-               unsigned int sample_rate;
-               unsigned int channels;
-       } audio;
+       struct tegra_hda_format format;
 };
 
 struct tegra_sor_state {
@@ -1818,7 +1816,7 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
 
        /* XXX not in TRM */
        for (value = 0, i = 0; i < 5; i++)
-               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
                         SOR_XBAR_CTRL_LINK1_XSEL(i, i);
 
        tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
@@ -2116,7 +2114,8 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
        value &= ~INFOFRAME_CTRL_ENABLE;
        tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
 
-       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+                                                      &sor->output.connector, mode);
        if (err < 0) {
                dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
                return err;
@@ -2185,7 +2184,7 @@ static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
                return err;
        }
 
-       frame.channels = sor->audio.channels;
+       frame.channels = sor->format.channels;
 
        err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
        if (err < 0) {
@@ -2214,7 +2213,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
        value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
 
        /* inject null samples */
-       if (sor->audio.channels != 2)
+       if (sor->format.channels != 2)
                value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
        else
                value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
@@ -2245,7 +2244,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
        value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
        tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
 
-       value = (24000 * 4096) / (128 * sor->audio.sample_rate / 1000);
+       value = (24000 * 4096) / (128 * sor->format.sample_rate / 1000);
        tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
        tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
 
@@ -2258,15 +2257,15 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
        tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
        tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
 
-       value = (24000 * 6144) / (128 * sor->audio.sample_rate / 1000);
+       value = (24000 * 6144) / (128 * sor->format.sample_rate / 1000);
        tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
        tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
 
-       value = (24000 * 12288) / (128 * sor->audio.sample_rate / 1000);
+       value = (24000 * 12288) / (128 * sor->format.sample_rate / 1000);
        tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
        tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
 
-       value = (24000 * 24576) / (128 * sor->audio.sample_rate / 1000);
+       value = (24000 * 24576) / (128 * sor->format.sample_rate / 1000);
        tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
        tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
 
@@ -2554,7 +2553,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
 
        /* XXX not in TRM */
        for (value = 0, i = 0; i < 5; i++)
-               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
                         SOR_XBAR_CTRL_LINK1_XSEL(i, i);
 
        tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
@@ -3175,6 +3174,8 @@ MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
 static int tegra_sor_parse_dt(struct tegra_sor *sor)
 {
        struct device_node *np = sor->dev->of_node;
+       u32 xbar_cfg[5];
+       unsigned int i;
        u32 value;
        int err;
 
@@ -3192,25 +3193,18 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
                sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
        }
 
-       return 0;
-}
-
-static void tegra_hda_parse_format(unsigned int format, unsigned int *rate,
-                                  unsigned int *channels)
-{
-       unsigned int mul, div;
-
-       if (format & AC_FMT_BASE_44K)
-               *rate = 44100;
-       else
-               *rate = 48000;
-
-       mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
-       div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
-
-       *rate = *rate * (mul + 1) / (div + 1);
+       err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
+       if (err < 0) {
+               /* fall back to default per-SoC XBAR configuration */
+               for (i = 0; i < 5; i++)
+                       sor->xbar_cfg[i] = sor->soc->xbar_cfg[i];
+       } else {
+               /* copy cells to SOR XBAR configuration */
+               for (i = 0; i < 5; i++)
+                       sor->xbar_cfg[i] = xbar_cfg[i];
+       }
 
-       *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+       return 0;
 }
 
 static irqreturn_t tegra_sor_irq(int irq, void *data)
@@ -3225,14 +3219,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data)
                value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
 
                if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
-                       unsigned int format, sample_rate, channels;
+                       unsigned int format;
 
                        format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
 
-                       tegra_hda_parse_format(format, &sample_rate, &channels);
-
-                       sor->audio.sample_rate = sample_rate;
-                       sor->audio.channels = channels;
+                       tegra_hda_parse_format(format, &sor->format);
 
                        tegra_sor_hdmi_audio_enable(sor);
                } else {
index d47983deb1cff6ecd5fee2be06b59091c16115d6..39bfed9623de28f0e62a0297f8e84b7151c28238 100644 (file)
@@ -26,6 +26,7 @@
 struct vic_config {
        const char *firmware;
        unsigned int version;
+       bool supports_sid;
 };
 
 struct vic {
@@ -105,6 +106,22 @@ static int vic_boot(struct vic *vic)
        if (vic->booted)
                return 0;
 
+       if (vic->config->supports_sid) {
+               struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+               u32 value;
+
+               value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
+                       TRANSCFG_ATT(0, TRANSCFG_SID_HW);
+               vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
+
+               if (spec && spec->num_ids > 0) {
+                       value = spec->ids[0] & 0xffff;
+
+                       vic_writel(vic, value, VIC_THI_STREAMID0);
+                       vic_writel(vic, value, VIC_THI_STREAMID1);
+               }
+       }
+
        /* setup clockgating registers */
        vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
                        CG_IDLE_CG_EN |
@@ -181,13 +198,6 @@ static int vic_init(struct host1x_client *client)
                vic->domain = tegra->domain;
        }
 
-       if (!vic->falcon.data) {
-               vic->falcon.data = tegra;
-               err = falcon_load_firmware(&vic->falcon);
-               if (err < 0)
-                       goto detach;
-       }
-
        vic->channel = host1x_channel_request(client->dev);
        if (!vic->channel) {
                err = -ENOMEM;
@@ -246,6 +256,30 @@ static const struct host1x_client_ops vic_client_ops = {
        .exit = vic_exit,
 };
 
+static int vic_load_firmware(struct vic *vic)
+{
+       int err;
+
+       if (vic->falcon.data)
+               return 0;
+
+       vic->falcon.data = vic->client.drm;
+
+       err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
+       if (err < 0)
+               goto cleanup;
+
+       err = falcon_load_firmware(&vic->falcon);
+       if (err < 0)
+               goto cleanup;
+
+       return 0;
+
+cleanup:
+       vic->falcon.data = NULL;
+       return err;
+}
+
 static int vic_open_channel(struct tegra_drm_client *client,
                            struct tegra_drm_context *context)
 {
@@ -256,19 +290,25 @@ static int vic_open_channel(struct tegra_drm_client *client,
        if (err < 0)
                return err;
 
+       err = vic_load_firmware(vic);
+       if (err < 0)
+               goto rpm_put;
+
        err = vic_boot(vic);
-       if (err < 0) {
-               pm_runtime_put(vic->dev);
-               return err;
-       }
+       if (err < 0)
+               goto rpm_put;
 
        context->channel = host1x_channel_get(vic->channel);
        if (!context->channel) {
-               pm_runtime_put(vic->dev);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto rpm_put;
        }
 
        return 0;
+
+rpm_put:
+       pm_runtime_put(vic->dev);
+       return err;
 }
 
 static void vic_close_channel(struct tegra_drm_context *context)
@@ -291,6 +331,7 @@ static const struct tegra_drm_client_ops vic_ops = {
 static const struct vic_config vic_t124_config = {
        .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
        .version = 0x40,
+       .supports_sid = false,
 };
 
 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
@@ -298,6 +339,7 @@ static const struct vic_config vic_t124_config = {
 static const struct vic_config vic_t210_config = {
        .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
        .version = 0x21,
+       .supports_sid = false,
 };
 
 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
@@ -305,6 +347,7 @@ static const struct vic_config vic_t210_config = {
 static const struct vic_config vic_t186_config = {
        .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
        .version = 0x18,
+       .supports_sid = true,
 };
 
 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
@@ -312,6 +355,7 @@ static const struct vic_config vic_t186_config = {
 static const struct vic_config vic_t194_config = {
        .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
        .version = 0x19,
+       .supports_sid = true,
 };
 
 static const struct of_device_id vic_match[] = {
@@ -372,10 +416,6 @@ static int vic_probe(struct platform_device *pdev)
        if (err < 0)
                return err;
 
-       err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
-       if (err < 0)
-               goto exit_falcon;
-
        platform_set_drvdata(pdev, vic);
 
        INIT_LIST_HEAD(&vic->client.base.list);
@@ -393,7 +433,6 @@ static int vic_probe(struct platform_device *pdev)
        err = host1x_client_register(&vic->client.base);
        if (err < 0) {
                dev_err(dev, "failed to register host1x client: %d\n", err);
-               platform_set_drvdata(pdev, NULL);
                goto exit_falcon;
        }
 
index 21844817a7e1201eab0a5b98e0f88355e0888c8f..017584340dd62c92c592f00a3ce6b37f487a15d0 100644 (file)
 
 /* VIC registers */
 
+#define VIC_THI_STREAMID0      0x00000030
+#define VIC_THI_STREAMID1      0x00000034
+
 #define NV_PVIC_MISC_PRI_VIC_CG                        0x000016d0
 #define CG_IDLE_CG_DLY_CNT(val)                        ((val & 0x3f) << 0)
 #define CG_IDLE_CG_EN                          (1 << 6)
 #define CG_WAKEUP_DLY_CNT(val)                 ((val & 0xf) << 16)
 
+#define VIC_TFBIF_TRANSCFG     0x00002044
+#define  TRANSCFG_ATT(i, v)    (((v) & 0x3) << (i * 4))
+#define  TRANSCFG_SID_HW       0
+#define  TRANSCFG_SID_PHY      1
+#define  TRANSCFG_SID_FALCON   2
+
 /* Firmware offsets */
 
 #define VIC_UCODE_FCE_HEADER_OFFSET            (6*4)
index 337e86a1d5eaa59c358e4cf0393510ea5c045605..3030af9e7b350c47e4b8761f00ab536f53870675 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
@@ -511,7 +512,7 @@ static int tilcdc_debugfs_init(struct drm_minor *minor)
 DEFINE_DRM_GEM_CMA_FOPS(fops);
 
 static struct drm_driver tilcdc_driver = {
-       .driver_features    = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
+       .driver_features    = (DRIVER_GEM | DRIVER_MODESET |
                               DRIVER_PRIME | DRIVER_ATOMIC),
        .irq_handler        = tilcdc_irq,
        .gem_free_object_unlocked = drm_gem_cma_free_object,
index 62cea5ff5558b546b0b0adf413ea9a064cfe48a1..d86397da12a9d27cfadc336e2a146cbf6fad3273 100644 (file)
 #include <linux/list.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_bridge.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 
 /* Defaulting to pixel clock defined on AM335x */
 #define TILCDC_DEFAULT_MAX_PIXELCLOCK  126000
index b4eaf9bc87f8e171246f4585dbfd470c114468f4..e9969cd366108090ce9c52e855d3106ca31136b8 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/component.h>
 #include <linux/of_graph.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_of.h>
 
 #include "tilcdc_drv.h"
index a1acab39d87f49385c51edddbaeae87e9745c956..5d532a596e1e6e6f0cac64a28945804010ce2621 100644 (file)
@@ -23,6 +23,7 @@
 #include <video/of_display_timing.h>
 #include <video/videomode.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_panel.h"
index daebf1aa6b0a841dc28d8b681e591b5445a116c1..fe59fbfdde69976189dbc76ac9ae624e9e15be44 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/consumer.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_tfp410.h"
index 01a6f2d42440e0c2f3573698abfe6ea70f3408b1..554abd5d3b5382d565ad00b7f1aca1668e156b9e 100644 (file)
@@ -9,12 +9,15 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
 #include <drm/tinydrm/tinydrm.h>
 #include <linux/device.h>
 #include <linux/dma-buf.h>
+#include <linux/module.h>
 
 /**
  * DOC: overview
  * and registers the DRM device using devm_tinydrm_register().
  */
 
-static struct drm_framebuffer *
-tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
-                 const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       struct tinydrm_device *tdev = drm->dev_private;
-
-       return drm_gem_fb_create_with_funcs(drm, file_priv, mode_cmd,
-                                           tdev->fb_funcs);
-}
-
 static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = {
-       .fb_create = tinydrm_fb_create,
+       .fb_create = drm_gem_fb_create_with_dirty,
        .atomic_check = drm_atomic_helper_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
 
 static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
-                       const struct drm_framebuffer_funcs *fb_funcs,
                        struct drm_driver *driver)
 {
        struct drm_device *drm;
 
-       mutex_init(&tdev->dirty_lock);
-       tdev->fb_funcs = fb_funcs;
-
        /*
         * We don't embed drm_device, because that prevent us from using
         * devm_kzalloc() to allocate tinydrm_device in the driver since
@@ -83,7 +72,6 @@ static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
 static void tinydrm_fini(struct tinydrm_device *tdev)
 {
        drm_mode_config_cleanup(tdev->drm);
-       mutex_destroy(&tdev->dirty_lock);
        tdev->drm->dev_private = NULL;
        drm_dev_put(tdev->drm);
 }
@@ -97,7 +85,6 @@ static void devm_tinydrm_release(void *data)
  * devm_tinydrm_init - Initialize tinydrm device
  * @parent: Parent device object
  * @tdev: tinydrm device
- * @fb_funcs: Framebuffer functions
  * @driver: DRM driver
  *
  * This function initializes @tdev, the underlying DRM device and it's
@@ -108,12 +95,11 @@ static void devm_tinydrm_release(void *data)
  * Zero on success, negative error code on failure.
  */
 int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
-                     const struct drm_framebuffer_funcs *fb_funcs,
                      struct drm_driver *driver)
 {
        int ret;
 
-       ret = tinydrm_init(parent, tdev, fb_funcs, driver);
+       ret = tinydrm_init(parent, tdev, driver);
        if (ret)
                return ret;
 
index bf6bfbc5d412c8c2bb2e8a14fed1d16a139f1b88..2737b6fdadc85d5e60ef939fd5fd82d88ef5d526 100644 (file)
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
 #include <drm/drm_print.h>
-#include <drm/tinydrm/tinydrm.h>
+#include <drm/drm_rect.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
-#include <uapi/drm/drm.h>
 
 static unsigned int spi_max;
 module_param(spi_max, uint, 0400);
 MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
 
-/**
- * tinydrm_merge_clips - Merge clip rectangles
- * @dst: Destination clip rectangle
- * @src: Source clip rectangle(s)
- * @num_clips: Number of @src clip rectangles
- * @flags: Dirty fb ioctl flags
- * @max_width: Maximum width of @dst
- * @max_height: Maximum height of @dst
- *
- * This function merges @src clip rectangle(s) into @dst. If @src is NULL,
- * @max_width and @min_width is used to set a full @dst clip rectangle.
- *
- * Returns:
- * true if it's a full clip, false otherwise
- */
-bool tinydrm_merge_clips(struct drm_clip_rect *dst,
-                        struct drm_clip_rect *src, unsigned int num_clips,
-                        unsigned int flags, u32 max_width, u32 max_height)
-{
-       unsigned int i;
-
-       if (!src || !num_clips) {
-               dst->x1 = 0;
-               dst->x2 = max_width;
-               dst->y1 = 0;
-               dst->y2 = max_height;
-               return true;
-       }
-
-       dst->x1 = ~0;
-       dst->y1 = ~0;
-       dst->x2 = 0;
-       dst->y2 = 0;
-
-       for (i = 0; i < num_clips; i++) {
-               if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY)
-                       i++;
-               dst->x1 = min(dst->x1, src[i].x1);
-               dst->x2 = max(dst->x2, src[i].x2);
-               dst->y1 = min(dst->y1, src[i].y1);
-               dst->y2 = max(dst->y2, src[i].y2);
-       }
-
-       if (dst->x2 > max_width || dst->y2 > max_height ||
-           dst->x1 >= dst->x2 || dst->y1 >= dst->y2) {
-               DRM_DEBUG_KMS("Illegal clip: x1=%u, x2=%u, y1=%u, y2=%u\n",
-                             dst->x1, dst->x2, dst->y1, dst->y2);
-               dst->x1 = 0;
-               dst->y1 = 0;
-               dst->x2 = max_width;
-               dst->y2 = max_height;
-       }
-
-       return (dst->x2 - dst->x1) == max_width &&
-              (dst->y2 - dst->y1) == max_height;
-}
-EXPORT_SYMBOL(tinydrm_merge_clips);
-
-int tinydrm_fb_dirty(struct drm_framebuffer *fb,
-                    struct drm_file *file_priv,
-                    unsigned int flags, unsigned int color,
-                    struct drm_clip_rect *clips,
-                    unsigned int num_clips)
-{
-       struct tinydrm_device *tdev = fb->dev->dev_private;
-       struct drm_plane *plane = &tdev->pipe.plane;
-       int ret = 0;
-
-       drm_modeset_lock(&plane->mutex, NULL);
-
-       /* fbdev can flush even when we're not interested */
-       if (plane->state->fb == fb) {
-               mutex_lock(&tdev->dirty_lock);
-               ret = tdev->fb_dirty(fb, file_priv, flags,
-                                    color, clips, num_clips);
-               mutex_unlock(&tdev->dirty_lock);
-       }
-
-       drm_modeset_unlock(&plane->mutex);
-
-       if (ret)
-               dev_err_once(fb->dev->dev,
-                            "Failed to update display %d\n", ret);
-
-       return ret;
-}
-EXPORT_SYMBOL(tinydrm_fb_dirty);
-
 /**
  * tinydrm_memcpy - Copy clip buffer
  * @dst: Destination buffer
@@ -122,7 +34,7 @@ EXPORT_SYMBOL(tinydrm_fb_dirty);
  * @clip: Clip rectangle area to copy
  */
 void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
-                   struct drm_clip_rect *clip)
+                   struct drm_rect *clip)
 {
        unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
        unsigned int pitch = fb->pitches[0];
@@ -146,7 +58,7 @@ EXPORT_SYMBOL(tinydrm_memcpy);
  * @clip: Clip rectangle area to copy
  */
 void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
-                   struct drm_clip_rect *clip)
+                   struct drm_rect *clip)
 {
        size_t len = (clip->x2 - clip->x1) * sizeof(u16);
        unsigned int x, y;
@@ -186,7 +98,7 @@ EXPORT_SYMBOL(tinydrm_swab16);
  */
 void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
                                struct drm_framebuffer *fb,
-                               struct drm_clip_rect *clip, bool swap)
+                               struct drm_rect *clip, bool swap)
 {
        size_t len = (clip->x2 - clip->x1) * sizeof(u32);
        unsigned int x, y;
@@ -235,7 +147,7 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
  * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
  */
 void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
-                              struct drm_clip_rect *clip)
+                              struct drm_rect *clip)
 {
        unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
        unsigned int x, y;
index eacfc0ec8ff1dd0a1db5c9501b495e9c3f9b27d7..bb5b1c1e21ba4156a580c592f70818c387b55be2 100644 (file)
@@ -8,9 +8,11 @@
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modes.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
 #include <drm/tinydrm/tinydrm.h>
 
 struct tinydrm_connector {
@@ -108,36 +110,6 @@ tinydrm_connector_create(struct drm_device *drm,
        return connector;
 }
 
-/**
- * tinydrm_display_pipe_update - Display pipe update helper
- * @pipe: Simple display pipe
- * @old_state: Old plane state
- *
- * This function does a full framebuffer flush if the plane framebuffer
- * has changed. It also handles vblank events. Drivers can use this as their
- * &drm_simple_display_pipe_funcs->update callback.
- */
-void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
-                                struct drm_plane_state *old_state)
-{
-       struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
-       struct drm_framebuffer *fb = pipe->plane.state->fb;
-       struct drm_crtc *crtc = &tdev->pipe.crtc;
-
-       if (fb && (fb != old_state->fb)) {
-               if (tdev->fb_dirty)
-                       tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
-       }
-
-       if (crtc->state->event) {
-               spin_lock_irq(&crtc->dev->event_lock);
-               drm_crtc_send_vblank_event(crtc, crtc->state->event);
-               spin_unlock_irq(&crtc->dev->event_lock);
-               crtc->state->event = NULL;
-       }
-}
-EXPORT_SYMBOL(tinydrm_display_pipe_update);
-
 static int tinydrm_rotate_mode(struct drm_display_mode *mode,
                               unsigned int rotation)
 {
index 81a2bbeb25d41cc3c63388f3de50aa7b60f60a51..8bbd0beafc6a415fb134db7ddacb03e62685c2f5 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/property.h>
 #include <linux/spi/spi.h>
 
+#include <drm/drm_drv.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modeset_helper.h>
@@ -175,7 +176,7 @@ out_enable:
 static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
        .enable = yx240qv29_enable,
        .disable = mipi_dbi_pipe_disable,
-       .update = tinydrm_display_pipe_update,
+       .update = mipi_dbi_pipe_update,
        .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
index 78f7c2d1b4494078ec42959e22afd5429b06080e..43a3b68d90a20a1f6e023433f16e840d2eb038e9 100644 (file)
 #include <linux/spi/spi.h>
 #include <video/mipi_display.h>
 
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
 
@@ -73,16 +78,14 @@ static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
        return mipi_dbi_command_buf(mipi, cmd, par, 2);
 }
 
-static int ili9225_fb_dirty(struct drm_framebuffer *fb,
-                           struct drm_file *file_priv, unsigned int flags,
-                           unsigned int color, struct drm_clip_rect *clips,
-                           unsigned int num_clips)
+static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
 {
        struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        struct tinydrm_device *tdev = fb->dev->dev_private;
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+       unsigned int height = rect->y2 - rect->y1;
+       unsigned int width = rect->x2 - rect->x1;
        bool swap = mipi->swap_bytes;
-       struct drm_clip_rect clip;
        u16 x_start, y_start;
        u16 x1, x2, y1, y2;
        int ret = 0;
@@ -90,54 +93,52 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
        void *tr;
 
        if (!mipi->enabled)
-               return 0;
+               return;
 
-       full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
-                                  fb->width, fb->height);
+       full = width == fb->width && height == fb->height;
 
-       DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
-                 clip.x1, clip.x2, clip.y1, clip.y2);
+       DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 
        if (!mipi->dc || !full || swap ||
            fb->format->format == DRM_FORMAT_XRGB8888) {
                tr = mipi->tx_buf;
-               ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
+               ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, rect, swap);
                if (ret)
-                       return ret;
+                       goto err_msg;
        } else {
                tr = cma_obj->vaddr;
        }
 
        switch (mipi->rotation) {
        default:
-               x1 = clip.x1;
-               x2 = clip.x2 - 1;
-               y1 = clip.y1;
-               y2 = clip.y2 - 1;
+               x1 = rect->x1;
+               x2 = rect->x2 - 1;
+               y1 = rect->y1;
+               y2 = rect->y2 - 1;
                x_start = x1;
                y_start = y1;
                break;
        case 90:
-               x1 = clip.y1;
-               x2 = clip.y2 - 1;
-               y1 = fb->width - clip.x2;
-               y2 = fb->width - clip.x1 - 1;
+               x1 = rect->y1;
+               x2 = rect->y2 - 1;
+               y1 = fb->width - rect->x2;
+               y2 = fb->width - rect->x1 - 1;
                x_start = x1;
                y_start = y2;
                break;
        case 180:
-               x1 = fb->width - clip.x2;
-               x2 = fb->width - clip.x1 - 1;
-               y1 = fb->height - clip.y2;
-               y2 = fb->height - clip.y1 - 1;
+               x1 = fb->width - rect->x2;
+               x2 = fb->width - rect->x1 - 1;
+               y1 = fb->height - rect->y2;
+               y2 = fb->height - rect->y1 - 1;
                x_start = x2;
                y_start = y2;
                break;
        case 270:
-               x1 = fb->height - clip.y2;
-               x2 = fb->height - clip.y1 - 1;
-               y1 = clip.x1;
-               y2 = clip.x2 - 1;
+               x1 = fb->height - rect->y2;
+               x2 = fb->height - rect->y1 - 1;
+               y1 = rect->x1;
+               y2 = rect->x2 - 1;
                x_start = x2;
                y_start = y1;
                break;
@@ -152,16 +153,29 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
        ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_2, y_start);
 
        ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr,
-                               (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
-
-       return ret;
+                                  width * height * 2);
+err_msg:
+       if (ret)
+               dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
 }
 
-static const struct drm_framebuffer_funcs ili9225_fb_funcs = {
-       .destroy        = drm_gem_fb_destroy,
-       .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = tinydrm_fb_dirty,
-};
+static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
+                               struct drm_plane_state *old_state)
+{
+       struct drm_plane_state *state = pipe->plane.state;
+       struct drm_crtc *crtc = &pipe->crtc;
+       struct drm_rect rect;
+
+       if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+               ili9225_fb_dirty(state->fb, &rect);
+
+       if (crtc->state->event) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+               crtc->state->event = NULL;
+       }
+}
 
 static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
                                struct drm_crtc_state *crtc_state,
@@ -169,7 +183,14 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
 {
        struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+       struct drm_framebuffer *fb = plane_state->fb;
        struct device *dev = tdev->drm->dev;
+       struct drm_rect rect = {
+               .x1 = 0,
+               .x2 = fb->width,
+               .y1 = 0,
+               .y2 = fb->height,
+       };
        int ret;
        u8 am_id;
 
@@ -257,7 +278,8 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
 
        ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
 
-       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+       mipi->enabled = true;
+       ili9225_fb_dirty(fb, &rect);
 }
 
 static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -302,59 +324,10 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
        return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num);
 }
 
-static const u32 ili9225_formats[] = {
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-};
-
-static int ili9225_init(struct device *dev, struct mipi_dbi *mipi,
-                       const struct drm_simple_display_pipe_funcs *pipe_funcs,
-                       struct drm_driver *driver,
-                       const struct drm_display_mode *mode,
-                       unsigned int rotation)
-{
-       size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
-       struct tinydrm_device *tdev = &mipi->tinydrm;
-       int ret;
-
-       if (!mipi->command)
-               return -EINVAL;
-
-       mutex_init(&mipi->cmdlock);
-
-       mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
-       if (!mipi->tx_buf)
-               return -ENOMEM;
-
-       ret = devm_tinydrm_init(dev, tdev, &ili9225_fb_funcs, driver);
-       if (ret)
-               return ret;
-
-       tdev->fb_dirty = ili9225_fb_dirty;
-
-       ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
-                                       DRM_MODE_CONNECTOR_VIRTUAL,
-                                       ili9225_formats,
-                                       ARRAY_SIZE(ili9225_formats), mode,
-                                       rotation);
-       if (ret)
-               return ret;
-
-       tdev->drm->mode_config.preferred_depth = 16;
-       mipi->rotation = rotation;
-
-       drm_mode_config_reset(tdev->drm);
-
-       DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
-                     tdev->drm->mode_config.preferred_depth, rotation);
-
-       return 0;
-}
-
 static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
        .enable         = ili9225_pipe_enable,
        .disable        = ili9225_pipe_disable,
-       .update         = tinydrm_display_pipe_update,
+       .update         = ili9225_pipe_update,
        .prepare_fb     = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
@@ -421,8 +394,8 @@ static int ili9225_probe(struct spi_device *spi)
        /* override the command function set in  mipi_dbi_spi_init() */
        mipi->command = ili9225_dbi_command;
 
-       ret = ili9225_init(&spi->dev, mipi, &ili9225_pipe_funcs,
-                          &ili9225_driver, &ili9225_mode, rotation);
+       ret = mipi_dbi_init(&spi->dev, mipi, &ili9225_pipe_funcs,
+                           &ili9225_driver, &ili9225_mode, rotation);
        if (ret)
                return ret;
 
index 51395bdc6ca22a731d5bf9cd70d4bc29ca1387d1..713bb2dd7e04c6582ab516dbd41371b922b862b4 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/property.h>
 #include <linux/spi/spi.h>
 
+#include <drm/drm_drv.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modeset_helper.h>
@@ -131,7 +132,7 @@ out_enable:
 static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
        .enable = yx240qv29_enable,
        .disable = mipi_dbi_pipe_disable,
-       .update = tinydrm_display_pipe_update,
+       .update = mipi_dbi_pipe_update,
        .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
index 3fa62e77c30b2ceff4a3aeaf9c6f5b8d7eafce6a..82a92ec9ae3cedbe776a1bd9997c8b186d110918 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
+#include <drm/drm_drv.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modeset_helper.h>
@@ -139,7 +140,7 @@ out_enable:
 static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
        .enable = mi0283qt_enable,
        .disable = mipi_dbi_pipe_disable,
-       .update = tinydrm_display_pipe_update,
+       .update = mipi_dbi_pipe_update,
        .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
index 3a05e56f9b0d81c0cd15780478927086ee3af3c9..918f77c7de34e149080808e0161bad941bd8b3e3 100644 (file)
  */
 
 #include <linux/debugfs.h>
+#include <linux/delay.h>
 #include <linux/dma-buf.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_rect.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
-#include <uapi/drm/drm.h>
 #include <video/mipi_display.h>
 
 #define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -169,7 +174,7 @@ EXPORT_SYMBOL(mipi_dbi_command_buf);
  * Zero on success, negative error code on failure.
  */
 int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
-                     struct drm_clip_rect *clip, bool swap)
+                     struct drm_rect *clip, bool swap)
 {
        struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
@@ -208,58 +213,75 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
 }
 EXPORT_SYMBOL(mipi_dbi_buf_copy);
 
-static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
-                            struct drm_file *file_priv,
-                            unsigned int flags, unsigned int color,
-                            struct drm_clip_rect *clips,
-                            unsigned int num_clips)
+static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
 {
        struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        struct tinydrm_device *tdev = fb->dev->dev_private;
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+       unsigned int height = rect->y2 - rect->y1;
+       unsigned int width = rect->x2 - rect->x1;
        bool swap = mipi->swap_bytes;
-       struct drm_clip_rect clip;
        int ret = 0;
        bool full;
        void *tr;
 
        if (!mipi->enabled)
-               return 0;
+               return;
 
-       full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
-                                  fb->width, fb->height);
+       full = width == fb->width && height == fb->height;
 
-       DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
-                 clip.x1, clip.x2, clip.y1, clip.y2);
+       DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 
        if (!mipi->dc || !full || swap ||
            fb->format->format == DRM_FORMAT_XRGB8888) {
                tr = mipi->tx_buf;
-               ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
+               ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, rect, swap);
                if (ret)
-                       return ret;
+                       goto err_msg;
        } else {
                tr = cma_obj->vaddr;
        }
 
        mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
-                        (clip.x1 >> 8) & 0xFF, clip.x1 & 0xFF,
-                        ((clip.x2 - 1) >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
+                        (rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
+                        ((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
        mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
-                        (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
-                        ((clip.y2 - 1) >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+                        (rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
+                        ((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
 
        ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
-                               (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
-
-       return ret;
+                                  width * height * 2);
+err_msg:
+       if (ret)
+               dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
 }
 
-static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
-       .destroy        = drm_gem_fb_destroy,
-       .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = tinydrm_fb_dirty,
-};
+/**
+ * mipi_dbi_pipe_update - Display pipe update helper
+ * @pipe: Simple display pipe
+ * @old_state: Old plane state
+ *
+ * This function handles framebuffer flushing and vblank events. Drivers can use
+ * this as their &drm_simple_display_pipe_funcs->update callback.
+ */
+void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
+                         struct drm_plane_state *old_state)
+{
+       struct drm_plane_state *state = pipe->plane.state;
+       struct drm_crtc *crtc = &pipe->crtc;
+       struct drm_rect rect;
+
+       if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+               mipi_dbi_fb_dirty(state->fb, &rect);
+
+       if (crtc->state->event) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+               crtc->state->event = NULL;
+       }
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_update);
 
 /**
  * mipi_dbi_enable_flush - MIPI DBI enable helper
@@ -270,18 +292,25 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
  * This function sets &mipi_dbi->enabled, flushes the whole framebuffer and
  * enables the backlight. Drivers can use this in their
  * &drm_simple_display_pipe_funcs->enable callback.
+ *
+ * Note: Drivers which don't use mipi_dbi_pipe_update() because they have custom
+ * framebuffer flushing, can't use this function since they both use the same
+ * flushing code.
  */
 void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
                           struct drm_crtc_state *crtc_state,
                           struct drm_plane_state *plane_state)
 {
-       struct tinydrm_device *tdev = &mipi->tinydrm;
        struct drm_framebuffer *fb = plane_state->fb;
+       struct drm_rect rect = {
+               .x1 = 0,
+               .x2 = fb->width,
+               .y1 = 0,
+               .y2 = fb->height,
+       };
 
        mipi->enabled = true;
-       if (fb)
-               tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
-
+       mipi_dbi_fb_dirty(fb, &rect);
        backlight_enable(mipi->backlight);
 }
 EXPORT_SYMBOL(mipi_dbi_enable_flush);
@@ -373,12 +402,10 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
        if (!mipi->tx_buf)
                return -ENOMEM;
 
-       ret = devm_tinydrm_init(dev, tdev, &mipi_dbi_fb_funcs, driver);
+       ret = devm_tinydrm_init(dev, tdev, driver);
        if (ret)
                return ret;
 
-       tdev->fb_dirty = mipi_dbi_fb_dirty;
-
        /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
        ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
                                        DRM_MODE_CONNECTOR_VIRTUAL,
@@ -388,6 +415,8 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
        if (ret)
                return ret;
 
+       drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
+
        tdev->drm->mode_config.preferred_depth = 16;
        mipi->rotation = rotation;
 
index 54d6fe0f37ce8b2a72133027d4a313bd4458557c..b037c6540cf389de2dd8777ab909309bcdf83a10 100644 (file)
 #include <linux/spi/spi.h>
 #include <linux/thermal.h>
 
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
 #include <drm/tinydrm/tinydrm.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
 
@@ -521,17 +525,13 @@ static void repaper_gray8_to_mono_reversed(u8 *buf, u32 width, u32 height)
                }
 }
 
-static int repaper_fb_dirty(struct drm_framebuffer *fb,
-                           struct drm_file *file_priv,
-                           unsigned int flags, unsigned int color,
-                           struct drm_clip_rect *clips,
-                           unsigned int num_clips)
+static int repaper_fb_dirty(struct drm_framebuffer *fb)
 {
        struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
        struct tinydrm_device *tdev = fb->dev->dev_private;
        struct repaper_epd *epd = epd_from_tinydrm(tdev);
-       struct drm_clip_rect clip;
+       struct drm_rect clip;
        u8 *buf = NULL;
        int ret = 0;
 
@@ -624,12 +624,6 @@ out_free:
        return ret;
 }
 
-static const struct drm_framebuffer_funcs repaper_fb_funcs = {
-       .destroy        = drm_gem_fb_destroy,
-       .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = tinydrm_fb_dirty,
-};
-
 static void power_off(struct repaper_epd *epd)
 {
        /* Turn off power and all signals */
@@ -793,9 +787,7 @@ static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
 
        DRM_DEBUG_DRIVER("\n");
 
-       mutex_lock(&tdev->dirty_lock);
        epd->enabled = false;
-       mutex_unlock(&tdev->dirty_lock);
 
        /* Nothing frame */
        for (line = 0; line < epd->height; line++)
@@ -838,10 +830,28 @@ static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
        power_off(epd);
 }
 
+static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
+                               struct drm_plane_state *old_state)
+{
+       struct drm_plane_state *state = pipe->plane.state;
+       struct drm_crtc *crtc = &pipe->crtc;
+       struct drm_rect rect;
+
+       if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+               repaper_fb_dirty(state->fb);
+
+       if (crtc->state->event) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+               crtc->state->event = NULL;
+       }
+}
+
 static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
        .enable = repaper_pipe_enable,
        .disable = repaper_pipe_disable,
-       .update = tinydrm_display_pipe_update,
+       .update = repaper_pipe_update,
        .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
@@ -1055,12 +1065,10 @@ static int repaper_probe(struct spi_device *spi)
 
        tdev = &epd->tinydrm;
 
-       ret = devm_tinydrm_init(dev, tdev, &repaper_fb_funcs, &repaper_driver);
+       ret = devm_tinydrm_init(dev, tdev, &repaper_driver);
        if (ret)
                return ret;
 
-       tdev->fb_dirty = repaper_fb_dirty;
-
        ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
                                        DRM_MODE_CONNECTOR_VIRTUAL,
                                        repaper_formats,
index a6a8a1081b73de27256d12b00bda06a4204a4017..01a8077954b341046087ae0d8c6230e10ff18707 100644 (file)
 #include <linux/spi/spi.h>
 #include <video/mipi_display.h>
 
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
 
@@ -61,7 +65,7 @@ static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 };
 
 static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
                                       struct drm_framebuffer *fb,
-                                      struct drm_clip_rect *clip)
+                                      struct drm_rect *clip)
 {
        size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
        unsigned int x, y;
@@ -87,7 +91,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
 }
 
 static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
-                          struct drm_clip_rect *clip)
+                          struct drm_rect *clip)
 {
        struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
@@ -110,57 +114,62 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
        return ret;
 }
 
-static int st7586_fb_dirty(struct drm_framebuffer *fb,
-                          struct drm_file *file_priv, unsigned int flags,
-                          unsigned int color, struct drm_clip_rect *clips,
-                          unsigned int num_clips)
+static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
 {
        struct tinydrm_device *tdev = fb->dev->dev_private;
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
-       struct drm_clip_rect clip;
        int start, end;
        int ret = 0;
 
        if (!mipi->enabled)
-               return 0;
-
-       tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width,
-                           fb->height);
+               return;
 
        /* 3 pixels per byte, so grow clip to nearest multiple of 3 */
-       clip.x1 = rounddown(clip.x1, 3);
-       clip.x2 = roundup(clip.x2, 3);
+       rect->x1 = rounddown(rect->x1, 3);
+       rect->x2 = roundup(rect->x2, 3);
 
-       DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
-                 clip.x1, clip.x2, clip.y1, clip.y2);
+       DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 
-       ret = st7586_buf_copy(mipi->tx_buf, fb, &clip);
+       ret = st7586_buf_copy(mipi->tx_buf, fb, rect);
        if (ret)
-               return ret;
+               goto err_msg;
 
        /* Pixels are packed 3 per byte */
-       start = clip.x1 / 3;
-       end = clip.x2 / 3;
+       start = rect->x1 / 3;
+       end = rect->x2 / 3;
 
        mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
                         (start >> 8) & 0xFF, start & 0xFF,
                         (end >> 8) & 0xFF, (end - 1) & 0xFF);
        mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
-                        (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
-                        (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+                        (rect->y1 >> 8) & 0xFF, rect->y1 & 0xFF,
+                        (rect->y2 >> 8) & 0xFF, (rect->y2 - 1) & 0xFF);
 
        ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
                                   (u8 *)mipi->tx_buf,
-                                  (end - start) * (clip.y2 - clip.y1));
-
-       return ret;
+                                  (end - start) * (rect->y2 - rect->y1));
+err_msg:
+       if (ret)
+               dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
 }
 
-static const struct drm_framebuffer_funcs st7586_fb_funcs = {
-       .destroy        = drm_gem_fb_destroy,
-       .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = tinydrm_fb_dirty,
-};
+static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
+                              struct drm_plane_state *old_state)
+{
+       struct drm_plane_state *state = pipe->plane.state;
+       struct drm_crtc *crtc = &pipe->crtc;
+       struct drm_rect rect;
+
+       if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+               st7586_fb_dirty(state->fb, &rect);
+
+       if (crtc->state->event) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+               crtc->state->event = NULL;
+       }
+}
 
 static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
                               struct drm_crtc_state *crtc_state,
@@ -168,6 +177,13 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
 {
        struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+       struct drm_framebuffer *fb = plane_state->fb;
+       struct drm_rect rect = {
+               .x1 = 0,
+               .x2 = fb->width,
+               .y1 = 0,
+               .y2 = fb->height,
+       };
        int ret;
        u8 addr_mode;
 
@@ -224,9 +240,10 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
 
        msleep(100);
 
-       mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+       mipi->enabled = true;
+       st7586_fb_dirty(fb, &rect);
 
-       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+       mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
 }
 
 static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -262,12 +279,10 @@ static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
        if (!mipi->tx_buf)
                return -ENOMEM;
 
-       ret = devm_tinydrm_init(dev, tdev, &st7586_fb_funcs, driver);
+       ret = devm_tinydrm_init(dev, tdev, driver);
        if (ret)
                return ret;
 
-       tdev->fb_dirty = st7586_fb_dirty;
-
        ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
                                        DRM_MODE_CONNECTOR_VIRTUAL,
                                        st7586_formats,
@@ -276,6 +291,8 @@ static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
        if (ret)
                return ret;
 
+       drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
+
        tdev->drm->mode_config.preferred_depth = 32;
        mipi->rotation = rotation;
 
@@ -290,7 +307,7 @@ static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
 static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
        .enable         = st7586_pipe_enable,
        .disable        = st7586_pipe_disable,
-       .update         = tinydrm_display_pipe_update,
+       .update         = st7586_pipe_update,
        .prepare_fb     = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
index b39779e0dcd8765c58be58b97e63c60848aa82ea..3bab9a9569a67744b86e856368a6cfbb30401e77 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/spi/spi.h>
 #include <video/mipi_display.h>
 
+#include <drm/drm_drv.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
@@ -105,7 +106,7 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
 static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
        .enable         = jd_t18003_t01_pipe_enable,
        .disable        = mipi_dbi_pipe_disable,
-       .update         = tinydrm_display_pipe_update,
+       .update         = mipi_dbi_pipe_update,
        .prepare_fb     = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
index 28e2d03c0ccffa5b7403ec723ea0a676638872cb..d5c6a7ecf232daed95c6e4cd524239b2058c1882 100644 (file)
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_panel.h>
 #include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 
 #include "tve200_drm.h"
 
index 68e88bed77ca795dbb3537599e76fdab86e62128..66885c24590f0147ce1a510991a546c4f2bbe427 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "udl_connector.h"
 #include "udl_drv.h"
 
index a63e3011e97163fb6cd738bb56b98f3beb1ba838..22cd2d13e272f033d3e54b9245986ce22fa74486 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "udl_drv.h"
 
 static int udl_usb_suspend(struct usb_interface *interface,
index 1b014d92855b93991b17911b1fe40e823585dba1..9086d0d1b880de87de7609e55798e534b5cb3039 100644 (file)
@@ -12,6 +12,7 @@
  */
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "udl_drv.h"
 
 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
index 97caf1671dd06f883db650de9c52253e0680aa82..730008d3da761e2eb37d9821c518672567a6e1c6 100644 (file)
@@ -34,8 +34,8 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_uapi.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <linux/component.h>
index f185812970da7a44aa7b82d6ae88942ae673ec9a..169521e547bafe40fb36a6c52bab8ce8288bd083 100644 (file)
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/of_graph.h>
index f6f5cd80c04de355da85295457fc2a1a4011cdd9..5fcd2f0da7f7c56236150a1c2f4784ab6094a9eb 100644 (file)
@@ -175,7 +175,6 @@ static struct drm_driver vc4_drm_driver = {
        .driver_features = (DRIVER_MODESET |
                            DRIVER_ATOMIC |
                            DRIVER_GEM |
-                           DRIVER_HAVE_IRQ |
                            DRIVER_RENDER |
                            DRIVER_PRIME |
                            DRIVER_SYNCOBJ),
index c24b078f05939817ff0f94dba61ed6491eefd4a1..2c635f001c711b03b83614d1fba7fbe472b094dc 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm_types.h>
 #include <linux/reservation.h>
 #include <drm/drmP.h>
+#include <drm/drm_util.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_atomic.h>
index 0c607eb33d7e0515016e117717a8388aea5330d8..11702e1d90117e5719360f4f0f8694c00b215130 100644 (file)
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/completion.h>
index 2f276222e30fa6d130711124ab06c91538520e3f..88fd5df7e7dc65fe1cbd23fe4d40e42253a052f3 100644 (file)
@@ -43,8 +43,8 @@
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/i2c.h>
@@ -109,7 +109,6 @@ struct vc4_hdmi_encoder {
        struct vc4_encoder base;
        bool hdmi_monitor;
        bool limited_rgb_range;
-       bool rgb_range_selectable;
 };
 
 static inline struct vc4_hdmi_encoder *
@@ -280,11 +279,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
 
        vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);
 
-       if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
-               vc4_encoder->rgb_range_selectable =
-                       drm_rgb_quant_range_selectable(edid);
-       }
-
        drm_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
@@ -424,18 +418,18 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
        union hdmi_infoframe frame;
        int ret;
 
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                      hdmi->connector, mode);
        if (ret < 0) {
                DRM_ERROR("couldn't fill AVI infoframe\n");
                return;
        }
 
-       drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+       drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+                                          hdmi->connector, mode,
                                           vc4_encoder->limited_rgb_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
-                                          HDMI_QUANTIZATION_RANGE_FULL,
-                                          vc4_encoder->rgb_range_selectable,
-                                          false);
+                                          HDMI_QUANTIZATION_RANGE_FULL);
 
        frame.avi.right_bar = cstate->tv.margins.right;
        frame.avi.left_bar = cstate->tv.margins.left;
index 0490edb192a193d893d6c99166a9af26aa3584fa..91b8c72ff361f09653ce3dd8455db10f8ebbde2b 100644 (file)
@@ -17,9 +17,9 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "vc4_drv.h"
 #include "vc4_regs.h"
 
index 2901ed0c52230f2a77b79850478b0cd652a39565..d098337c10e9394d843c192ab08595566f631c59 100644 (file)
@@ -675,20 +675,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
                u32 tile_w, tile, x_off, pix_per_tile;
 
-               /* Column-based NV12 or RGBA.
-                */
-               if (fb->format->num_planes > 1) {
-                       if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
-                               DRM_DEBUG_KMS("SAND format only valid for NV12/21");
-                               return -EINVAL;
-                       }
-                       hvs_format = HVS_PIXEL_FORMAT_H264;
-               } else {
-                       if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
-                               DRM_DEBUG_KMS("SAND256 format only valid for H.264");
-                               return -EINVAL;
-                       }
-               }
+               hvs_format = HVS_PIXEL_FORMAT_H264;
 
                switch (base_format_mod) {
                case DRM_FORMAT_MOD_BROADCOM_SAND64:
@@ -1151,8 +1138,6 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
                switch (fourcc_mod_broadcom_mod(modifier)) {
                case DRM_FORMAT_MOD_LINEAR:
                case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
-               case DRM_FORMAT_MOD_BROADCOM_SAND64:
-               case DRM_FORMAT_MOD_BROADCOM_SAND128:
                        return true;
                default:
                        return false;
index 6e23c50168f9ab2bb6574d44b3790e501959ce50..aa279b5b0de78cb4eb7dab5e6ba5dd892b59abaa 100644 (file)
@@ -9,9 +9,9 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_writeback.h>
 #include <linux/clk.h>
 #include <linux/component.h>
index 8e7facb6514efdc7e0d90eadb7240e867cff4ba4..858c3a483229a23db2bc8eb23eb4d1de4be705a5 100644 (file)
@@ -25,9 +25,9 @@
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/of_graph.h>
index 345bda4494e19e5ec6c65af3a1c60bb3d6ab9649..8bf3a7c23ed369e76f06b9e8a46969651ba447b1 100644 (file)
@@ -177,12 +177,14 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
        switch (vsg->state) {
        case dr_via_device_mapped:
                via_unmap_blit_from_device(pdev, vsg);
+               /* fall through */
        case dr_via_desc_pages_alloc:
                for (i = 0; i < vsg->num_desc_pages; ++i) {
                        if (vsg->desc_pages[i] != NULL)
                                free_page((unsigned long)vsg->desc_pages[i]);
                }
                kfree(vsg->desc_pages);
+               /* fall through */
        case dr_via_pages_locked:
                for (i = 0; i < vsg->num_pages; ++i) {
                        if (NULL != (page = vsg->pages[i])) {
@@ -191,8 +193,10 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
                                put_page(page);
                        }
                }
+               /* fall through */
        case dr_via_pages_alloc:
                vfree(vsg->pages);
+               /* fall through */
        default:
                vsg->state = dr_via_sg_init;
        }
index aaf766f7cca25a96a05f211cceea71c24e1699b8..af6a12d3c05829b49efa9f8248b0d48c1fd209af 100644 (file)
@@ -70,8 +70,7 @@ static const struct file_operations via_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY |
-           DRIVER_IRQ_SHARED,
+           DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY,
        .load = via_driver_load,
        .unload = via_driver_unload,
        .open = via_driver_open,
index f29deec83d1f0478550b2d5b400e918a99607b55..4e90cc8fa651c596052aeedb310d58ee2bb119ba 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \
+virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
        virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
        virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
        virtgpu_ioctl.o virtgpu_prime.o
index e1c223e18d8684e0f1f7a75d526a54dbf8159f11..653ec7d0bf4d85ccb57780cefe9c14a050861432 100644 (file)
@@ -26,9 +26,9 @@
  */
 
 #include "virtgpu_drv.h"
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #define XRES_MIN    32
 #define YRES_MIN    32
@@ -243,12 +243,8 @@ static enum drm_connector_status virtio_gpu_conn_detect(
 
 static void virtio_gpu_conn_destroy(struct drm_connector *connector)
 {
-       struct virtio_gpu_output *virtio_gpu_output =
-               drm_connector_to_virtio_gpu_output(connector);
-
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
-       kfree(virtio_gpu_output);
 }
 
 static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
@@ -362,7 +358,7 @@ static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
        .atomic_commit = drm_atomic_helper_commit,
 };
 
-int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
+void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
 {
        int i;
 
@@ -381,7 +377,6 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
                vgdev_output_init(vgdev, i);
 
        drm_mode_config_reset(vgdev->ddev);
-       return 0;
 }
 
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
deleted file mode 100644 (file)
index 0887e0b..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <drm/drm_fb_helper.h>
-
-#include "virtgpu_drv.h"
-
-int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
-{
-       struct drm_device *dev;
-       int ret;
-
-       dev = drm_dev_alloc(driver, &vdev->dev);
-       if (IS_ERR(dev))
-               return PTR_ERR(dev);
-       vdev->priv = dev;
-
-       if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
-               struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
-               const char *pname = dev_name(&pdev->dev);
-               bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
-               char unique[20];
-
-               DRM_INFO("pci: %s detected at %s\n",
-                        vga ? "virtio-vga" : "virtio-gpu-pci",
-                        pname);
-               dev->pdev = pdev;
-               if (vga)
-                       drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
-                                                                         0,
-                                                                         "virtiodrmfb");
-
-               /*
-                * Normally the drm_dev_set_unique() call is done by core DRM.
-                * The following comment covers, why virtio cannot rely on it.
-                *
-                * Unlike the other virtual GPU drivers, virtio abstracts the
-                * underlying bus type by using struct virtio_device.
-                *
-                * Hence the dev_is_pci() check, used in core DRM, will fail
-                * and the unique returned will be the virtio_device "virtio0",
-                * while a "pci:..." one is required.
-                *
-                * A few other ideas were considered:
-                * - Extend the dev_is_pci() check [in drm_set_busid] to
-                *   consider virtio.
-                *   Seems like a bigger hack than what we have already.
-                *
-                * - Point drm_device::dev to the parent of the virtio_device
-                *   Semantic changes:
-                *   * Using the wrong device for i2c, framebuffer_alloc and
-                *     prime import.
-                *   Visual changes:
-                *   * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
-                *     will print the wrong information.
-                *
-                * We could address the latter issues, by introducing
-                * drm_device::bus_dev, ... which would be used solely for this.
-                *
-                * So for the moment keep things as-is, with a bulky comment
-                * for the next person who feels like removing this
-                * drm_dev_set_unique() quirk.
-                */
-               snprintf(unique, sizeof(unique), "pci:%s", pname);
-               ret = drm_dev_set_unique(dev, unique);
-               if (ret)
-                       goto err_free;
-
-       }
-
-       ret = drm_dev_register(dev, 0);
-       if (ret)
-               goto err_free;
-
-       return 0;
-
-err_free:
-       drm_dev_put(dev);
-       return ret;
-}
index 7df50920c1e04e11637575bf2c9a587d1da794dd..af92964b6889dd0dbfaadac5558cb27ee78b3d56 100644 (file)
@@ -40,8 +40,60 @@ static int virtio_gpu_modeset = -1;
 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 module_param_named(modeset, virtio_gpu_modeset, int, 0400);
 
+static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev)
+{
+       struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+       const char *pname = dev_name(&pdev->dev);
+       bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+       char unique[20];
+
+       DRM_INFO("pci: %s detected at %s\n",
+                vga ? "virtio-vga" : "virtio-gpu-pci",
+                pname);
+       dev->pdev = pdev;
+       if (vga)
+               drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+                                                                 0,
+                                                                 "virtiodrmfb");
+
+       /*
+        * Normally the drm_dev_set_unique() call is done by core DRM.
+        * The following comment covers, why virtio cannot rely on it.
+        *
+        * Unlike the other virtual GPU drivers, virtio abstracts the
+        * underlying bus type by using struct virtio_device.
+        *
+        * Hence the dev_is_pci() check, used in core DRM, will fail
+        * and the unique returned will be the virtio_device "virtio0",
+        * while a "pci:..." one is required.
+        *
+        * A few other ideas were considered:
+        * - Extend the dev_is_pci() check [in drm_set_busid] to
+        *   consider virtio.
+        *   Seems like a bigger hack than what we have already.
+        *
+        * - Point drm_device::dev to the parent of the virtio_device
+        *   Semantic changes:
+        *   * Using the wrong device for i2c, framebuffer_alloc and
+        *     prime import.
+        *   Visual changes:
+        *   * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
+        *     will print the wrong information.
+        *
+        * We could address the latter issues, by introducing
+        * drm_device::bus_dev, ... which would be used solely for this.
+        *
+        * So for the moment keep things as-is, with a bulky comment
+        * for the next person who feels like removing this
+        * drm_dev_set_unique() quirk.
+        */
+       snprintf(unique, sizeof(unique), "pci:%s", pname);
+       return drm_dev_set_unique(dev, unique);
+}
+
 static int virtio_gpu_probe(struct virtio_device *vdev)
 {
+       struct drm_device *dev;
        int ret;
 
        if (vgacon_text_force() && virtio_gpu_modeset == -1)
@@ -50,18 +102,39 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
        if (virtio_gpu_modeset == 0)
                return -EINVAL;
 
-       ret = drm_virtio_init(&driver, vdev);
+       dev = drm_dev_alloc(&driver, &vdev->dev);
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
+       vdev->priv = dev;
+
+       if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
+               ret = virtio_gpu_pci_quirk(dev, vdev);
+               if (ret)
+                       goto err_free;
+       }
+
+       ret = virtio_gpu_init(dev);
        if (ret)
-               return ret;
+               goto err_free;
+
+       ret = drm_dev_register(dev, 0);
+       if (ret)
+               goto err_free;
 
        drm_fbdev_generic_setup(vdev->priv, 32);
        return 0;
+
+err_free:
+       drm_dev_put(dev);
+       return ret;
 }
 
 static void virtio_gpu_remove(struct virtio_device *vdev)
 {
        struct drm_device *dev = vdev->priv;
 
+       drm_dev_unregister(dev);
+       virtio_gpu_deinit(dev);
        drm_put_dev(dev);
 }
 
@@ -123,8 +196,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
-       .load = virtio_gpu_driver_load,
-       .unload = virtio_gpu_driver_unload,
        .open = virtio_gpu_driver_open,
        .postclose = virtio_gpu_driver_postclose,
 
index 63704915f8ce4e81730dcf8bb043a0de51e2735e..d577cb76f5ad6b66d26124284159c82706f44699 100644 (file)
@@ -34,9 +34,9 @@
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_atomic.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
@@ -50,9 +50,6 @@
 #define DRIVER_MINOR 1
 #define DRIVER_PATCHLEVEL 0
 
-/* virtgpu_drm_bus.c */
-int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
-
 struct virtio_gpu_object {
        struct drm_gem_object gem_base;
        uint32_t hw_res_handle;
@@ -209,8 +206,8 @@ struct virtio_gpu_fpriv {
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
 
 /* virtio_kms.c */
-int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
-void virtio_gpu_driver_unload(struct drm_device *dev);
+int virtio_gpu_init(struct drm_device *dev);
+void virtio_gpu_deinit(struct drm_device *dev);
 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
 
@@ -320,7 +317,7 @@ int virtio_gpu_framebuffer_init(struct drm_device *dev,
                                struct virtio_gpu_framebuffer *vgfb,
                                const struct drm_mode_fb_cmd2 *mode_cmd,
                                struct drm_gem_object *obj);
-int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
 
 /* virtio_gpu_plane.c */
@@ -337,7 +334,6 @@ int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
 /* virtio_gpu_fence.c */
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(
        struct virtio_gpu_device *vgdev);
-void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
                          struct virtio_gpu_ctrl_hdr *cmd_hdr,
                          struct virtio_gpu_fence *fence);
index 4d6826b27814ee3002eb119216c2cc569ea880c2..21bd4c4a32d141b3efef38cb5ec040857abd0fa9 100644 (file)
@@ -81,14 +81,6 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
        return fence;
 }
 
-void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
-{
-       if (!fence)
-               return;
-
-       dma_fence_put(&fence->f);
-}
-
 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
                          struct virtio_gpu_ctrl_hdr *cmd_hdr,
                          struct virtio_gpu_fence *fence)
index 161b80fee492564346262d954a0bb8d9864aaae4..14ce8188c05237e3cadbd4def670817e81ddd4c2 100644 (file)
@@ -351,7 +351,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
                virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
                ret = virtio_gpu_object_attach(vgdev, qobj, fence);
                if (ret) {
-                       virtio_gpu_fence_cleanup(fence);
+                       dma_fence_put(&fence->f);
                        goto fail_backoff;
                }
                ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
index 1072064a0db29e485d2f05da07500a5b40d2392e..84b6a6bf00c68222cfde74ec7f9d64c43b0ed9f3 100644 (file)
@@ -106,7 +106,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
        vgdev->num_capsets = num_capsets;
 }
 
-int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
+int virtio_gpu_init(struct drm_device *dev)
 {
        static vq_callback_t *callbacks[] = {
                virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
@@ -193,9 +193,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
                     num_capsets, &num_capsets);
        DRM_INFO("number of cap sets: %d\n", num_capsets);
 
-       ret = virtio_gpu_modeset_init(vgdev);
-       if (ret)
-               goto err_modeset;
+       virtio_gpu_modeset_init(vgdev);
 
        virtio_device_ready(vgdev->vdev);
        vgdev->vqs_ready = true;
@@ -209,7 +207,6 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
                           5 * HZ);
        return 0;
 
-err_modeset:
 err_scanouts:
        virtio_gpu_ttm_fini(vgdev);
 err_ttm:
@@ -231,7 +228,7 @@ static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
        }
 }
 
-void virtio_gpu_driver_unload(struct drm_device *dev)
+void virtio_gpu_deinit(struct drm_device *dev)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
 
@@ -239,6 +236,7 @@ void virtio_gpu_driver_unload(struct drm_device *dev)
        flush_work(&vgdev->ctrlq.dequeue_work);
        flush_work(&vgdev->cursorq.dequeue_work);
        flush_work(&vgdev->config_changed_work);
+       vgdev->vdev->config->reset(vgdev->vdev);
        vgdev->vdev->config->del_vqs(vgdev->vdev);
 
        virtio_gpu_modeset_fini(vgdev);
index ead5c53d4e2163f02f9b859ca79d600ad9ae00ac..024c2aa0c929eab8ccdb9ef43b879447704b97b3 100644 (file)
@@ -130,11 +130,12 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
                                   plane->state->src_h >> 16,
                                   plane->state->src_x >> 16,
                                   plane->state->src_y >> 16);
-       virtio_gpu_cmd_resource_flush(vgdev, handle,
-                                     plane->state->src_x >> 16,
-                                     plane->state->src_y >> 16,
-                                     plane->state->src_w >> 16,
-                                     plane->state->src_h >> 16);
+       if (handle)
+               virtio_gpu_cmd_resource_flush(vgdev, handle,
+                                             plane->state->src_x >> 16,
+                                             plane->state->src_y >> 16,
+                                             plane->state->src_w >> 16,
+                                             plane->state->src_h >> 16);
 }
 
 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -168,8 +169,10 @@ static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
                return;
 
        vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
-       if (vgfb->fence)
-               virtio_gpu_fence_cleanup(vgfb->fence);
+       if (vgfb->fence) {
+               dma_fence_put(&vgfb->fence->f);
+               vgfb->fence = NULL;
+       }
 }
 
 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
index e27c4aedb8093e16a886aa74c1a20a85062ba1f4..6bc2008b0d0db1196c9b4d738265073a7ea4130b 100644 (file)
@@ -192,8 +192,16 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 
        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
                resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
-               if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
-                       DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+               if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
+                       if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+                               struct virtio_gpu_ctrl_hdr *cmd;
+                               cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
+                               DRM_ERROR("response 0x%x (command 0x%x)\n",
+                                         le32_to_cpu(resp->type),
+                                         le32_to_cpu(cmd->type));
+                       } else
+                               DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+               }
                if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
                        u64 f = le64_to_cpu(resp->fence_id);
 
index 177bbcb38306363b05d4dea04bedb83e617dc258..d44bfc392491a7772bc3af02aa3bfe2727f116ca 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "vkms_drv.h"
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 static void _vblank_handle(struct vkms_output *output)
 {
@@ -104,6 +104,7 @@ static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
        vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
        if (!vkms_state)
                return;
+       INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
 
        crtc->state = &vkms_state->base;
        crtc->state->crtc = crtc;
index 2a16b86196dcd17a557114936bdb51562d2e3c40..b13f99a5c84966406e3a6fbc87339dbb4ecffb7a 100644 (file)
 
 #include <linux/module.h>
 #include <drm/drm_gem.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "vkms_drv.h"
 
 #define DRIVER_NAME    "vkms"
index 271a0eb9042c3ee7ba01a3c73329f883dbb13bd3..c5b16efed51ab1f9de90792892ba4d38c0f0e89f 100644 (file)
@@ -7,8 +7,8 @@
  */
 
 #include "vkms_drv.h"
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
 
 static void vkms_connector_destroy(struct drm_connector *connector)
 {
index 25afb1d594e326c10e435e19a8f4346db2561459..4638f6791cdaebb874e63e75ad0e89a88f3b5771 100644 (file)
@@ -1582,7 +1582,7 @@ static const struct file_operations vmwgfx_driver_fops = {
 };
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+       .driver_features =
        DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
        .load = vmw_driver_load,
        .unload = vmw_driver_unload,
index 655abbcd4058272da076f8039bf1e942a13d78c8..535b03599e55893b52c23058db36c94992ddfc87 100644 (file)
@@ -29,8 +29,8 @@
 #define VMWGFX_KMS_H_
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder.h>
+#include <drm/drm_probe_helper.h>
 #include "vmwgfx_drv.h"
 
 /**
index 4d3d36fc3a5dd865e199a9239900b75333855c44..3e78a832d7f9fb12d85deb5fe08ab8d641819490 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_gem.h>
 
 #include <linux/of_device.h>
index 54af2669b1b37696545a0dda0e4533d47cecf528..9f5f31f77f1e2ed4d1896536dc8e60c751c9186a 100644 (file)
@@ -9,7 +9,7 @@
  */
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include <video/videomode.h>
 
index 28bc501af450c84c55c65f7d7cac971424353c35..d303a2e17f5e35579cb8333ebe3cff800575afc5 100644 (file)
@@ -11,9 +11,9 @@
 #include "xen_drm_front_gem.h"
 
 #include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_probe_helper.h>
 
 #include <linux/dma-buf.h>
 #include <linux/scatterlist.h>
index a3479eb72d79446c098cf7ebf7d3fb792a669f7e..860da055c6bb2727b71caefeba2e4b7391955448 100644 (file)
@@ -13,9 +13,9 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "xen_drm_front.h"
 #include "xen_drm_front_conn.h"
index f5ea32ae8600b70beb46ce5fdafe3d1a316a6832..28e8d607291001f86ad8ca5866f15b2a3697eb4b 100644 (file)
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include "zx_drm_drv.h"
index 78655269d84343d683fc2278740ff21e075c3d1d..df522d74bebfe8edef1ccfeeabd98be74bdc111d 100644 (file)
@@ -20,9 +20,9 @@
 #include <linux/of_device.h>
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include <sound/hdmi-codec.h>
@@ -125,7 +125,9 @@ static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
        union hdmi_infoframe frame;
        int ret;
 
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                      &hdmi->connector,
+                                                      mode);
        if (ret) {
                DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
                              ret);
index b73afb212fb246f8e5e7c9b5e1def38f8d1df8c8..87b5d86413d250e89c9fac6040941896153c3cb8 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/regmap.h>
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include "zx_drm_drv.h"
index 23d1ff4355a0304c9a1923d0e87a774b9f128414..e14c1d7097409b18c3d8c6aad69d3dc14a372e9b 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/regmap.h>
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include "zx_drm_drv.h"
index 442311d31110161d14532983d0fa335ca51a2dac..15400ffb1d225570c0ebf48a97b4603ebf61fef2 100644 (file)
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drmP.h>
 
 #include "zx_common_regs.h"
index b4c385d4a6af2ec2568d1f5226aa587da1c220db..103fffc1904bbea71d0bf5535a7938bc41b0ec53 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/debugfs.h>
 #include <linux/host1x.h>
 #include <linux/of.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/of_device.h>
 
@@ -500,6 +502,36 @@ static void host1x_detach_driver(struct host1x *host1x,
        mutex_unlock(&host1x->devices_lock);
 }
 
+static int host1x_devices_show(struct seq_file *s, void *data)
+{
+       struct host1x *host1x = s->private;
+       struct host1x_device *device;
+
+       mutex_lock(&host1x->devices_lock);
+
+       list_for_each_entry(device, &host1x->devices, list) {
+               struct host1x_subdev *subdev;
+
+               seq_printf(s, "%s\n", dev_name(&device->dev));
+
+               mutex_lock(&device->subdevs_lock);
+
+               list_for_each_entry(subdev, &device->active, list)
+                       seq_printf(s, "  %pOFf: %s\n", subdev->np,
+                                  dev_name(subdev->client->dev));
+
+               list_for_each_entry(subdev, &device->subdevs, list)
+                       seq_printf(s, "  %pOFf:\n", subdev->np);
+
+               mutex_unlock(&device->subdevs_lock);
+       }
+
+       mutex_unlock(&host1x->devices_lock);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(host1x_devices);
+
 /**
  * host1x_register() - register a host1x controller
  * @host1x: host1x controller
@@ -523,6 +555,9 @@ int host1x_register(struct host1x *host1x)
 
        mutex_unlock(&drivers_lock);
 
+       debugfs_create_file("devices", S_IRUGO, host1x->debugfs, host1x,
+                           &host1x_devices_fops);
+
        return 0;
 }
 
index 91df51e631b25c0b028b56089ae3f6029298a608..f45b7c69b69484ad3374fed9cbb8b56bf0b3fa68 100644 (file)
  * means that the push buffer is full, not empty.
  */
 
-#define HOST1X_PUSHBUFFER_SLOTS        512
+/*
+ * Typically the commands written into the push buffer are a pair of words. We
+ * use slots to represent each of these pairs and to simplify things. Note the
+ * strange number of slots allocated here. 512 slots will fit exactly within a
+ * single memory page. We also need one additional word at the end of the push
+ * buffer for the RESTART opcode that will instruct the CDMA to jump back to
+ * the beginning of the push buffer. With 512 slots, this means that we'll use
+ * 2 memory pages and waste 4092 bytes of the second page that will never be
+ * used.
+ */
+#define HOST1X_PUSHBUFFER_SLOTS        511
 
 /*
  * Clean up push buffer resources
@@ -143,7 +153,10 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
        WARN_ON(pb->pos == pb->fence);
        *(p++) = op1;
        *(p++) = op2;
-       pb->pos = (pb->pos + 8) & (pb->size - 1);
+       pb->pos += 8;
+
+       if (pb->pos >= pb->size)
+               pb->pos -= pb->size;
 }
 
 /*
@@ -153,7 +166,10 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
 static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
 {
        /* Advance the next write position */
-       pb->fence = (pb->fence + slots * 8) & (pb->size - 1);
+       pb->fence += slots * 8;
+
+       if (pb->fence >= pb->size)
+               pb->fence -= pb->size;
 }
 
 /*
@@ -161,7 +177,12 @@ static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
  */
 static u32 host1x_pushbuffer_space(struct push_buffer *pb)
 {
-       return ((pb->fence - pb->pos) & (pb->size - 1)) / 8;
+       unsigned int fence = pb->fence;
+
+       if (pb->fence < pb->pos)
+               fence += pb->size;
+
+       return (fence - pb->pos) / 8;
 }
 
 /*
@@ -210,13 +231,52 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
                cdma->event = event;
 
                mutex_unlock(&cdma->lock);
-               down(&cdma->sem);
+               wait_for_completion(&cdma->complete);
                mutex_lock(&cdma->lock);
        }
 
        return 0;
 }
 
+/*
+ * Sleep (if necessary) until the push buffer has enough free space.
+ *
+ * Must be called with the cdma lock held.
+ */
+int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
+                                     struct host1x_cdma *cdma,
+                                     unsigned int needed)
+{
+       while (true) {
+               struct push_buffer *pb = &cdma->push_buffer;
+               unsigned int space;
+
+               space = host1x_pushbuffer_space(pb);
+               if (space >= needed)
+                       break;
+
+               trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
+                                      CDMA_EVENT_PUSH_BUFFER_SPACE);
+
+               host1x_hw_cdma_flush(host1x, cdma);
+
+               /* If somebody has managed to already start waiting, yield */
+               if (cdma->event != CDMA_EVENT_NONE) {
+                       mutex_unlock(&cdma->lock);
+                       schedule();
+                       mutex_lock(&cdma->lock);
+                       continue;
+               }
+
+               cdma->event = CDMA_EVENT_PUSH_BUFFER_SPACE;
+
+               mutex_unlock(&cdma->lock);
+               wait_for_completion(&cdma->complete);
+               mutex_lock(&cdma->lock);
+       }
+
+       return 0;
+}
 /*
  * Start timer that tracks the time spent by the job.
  * Must be called with the cdma lock held.
@@ -314,7 +374,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
 
        if (signal) {
                cdma->event = CDMA_EVENT_NONE;
-               up(&cdma->sem);
+               complete(&cdma->complete);
        }
 }
 
@@ -323,7 +383,7 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
 {
        struct host1x *host1x = cdma_to_host1x(cdma);
        u32 restart_addr, syncpt_incrs, syncpt_val;
-       struct host1x_job *job = NULL;
+       struct host1x_job *job, *next_job = NULL;
 
        syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
 
@@ -341,40 +401,37 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
                __func__);
 
        list_for_each_entry(job, &cdma->sync_queue, list) {
-               if (syncpt_val < job->syncpt_end)
-                       break;
+               if (syncpt_val < job->syncpt_end) {
+
+                       if (!list_is_last(&job->list, &cdma->sync_queue))
+                               next_job = list_next_entry(job, list);
+
+                       goto syncpt_incr;
+               }
 
                host1x_job_dump(dev, job);
        }
 
+       /* all jobs have been completed */
+       job = NULL;
+
+syncpt_incr:
+
        /*
-        * Walk the sync_queue, first incrementing with the CPU syncpts that
-        * are partially executed (the first buffer) or fully skipped while
-        * still in the current context (slots are also NOP-ed).
+        * Increment with CPU the remaining syncpts of a partially executed job.
         *
-        * At the point contexts are interleaved, syncpt increments must be
-        * done inline with the pushbuffer from a GATHER buffer to maintain
-        * the order (slots are modified to be a GATHER of syncpt incrs).
-        *
-        * Note: save in restart_addr the location where the timed out buffer
-        * started in the PB, so we can start the refetch from there (with the
-        * modified NOP-ed PB slots). This lets things appear to have completed
-        * properly for this buffer and resources are freed.
+        * CDMA will continue execution starting with the next job or will get
+        * into idle state.
         */
-
-       dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n",
-               __func__);
-
-       if (!list_empty(&cdma->sync_queue))
-               restart_addr = job->first_get;
+       if (next_job)
+               restart_addr = next_job->first_get;
        else
                restart_addr = cdma->last_pos;
 
-       /* do CPU increments as long as this context continues */
-       list_for_each_entry_from(job, &cdma->sync_queue, list) {
-               /* different context, gets us out of this loop */
-               if (job->client != cdma->timeout.client)
-                       break;
+       /* do CPU increments for the remaining syncpts */
+       if (job) {
+               dev_dbg(dev, "%s: perform CPU incr on pending buffers\n",
+                       __func__);
 
                /* won't need a timeout when replayed */
                job->timeout = 0;
@@ -389,21 +446,10 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
                                                syncpt_incrs, job->syncpt_end,
                                                job->num_slots);
 
-               syncpt_val += syncpt_incrs;
+               dev_dbg(dev, "%s: finished sync_queue modification\n",
+                       __func__);
        }
 
-       /*
-        * The following sumbits from the same client may be dependent on the
-        * failed submit and therefore they may fail. Force a small timeout
-        * to make the queue cleanup faster.
-        */
-
-       list_for_each_entry_from(job, &cdma->sync_queue, list)
-               if (job->client == cdma->timeout.client)
-                       job->timeout = min_t(unsigned int, job->timeout, 500);
-
-       dev_dbg(dev, "%s: finished sync_queue modification\n", __func__);
-
        /* roll back DMAGET and start up channel again */
        host1x_hw_cdma_resume(host1x, cdma, restart_addr);
 }
@@ -416,7 +462,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma)
        int err;
 
        mutex_init(&cdma->lock);
-       sema_init(&cdma->sem, 0);
+       init_completion(&cdma->complete);
 
        INIT_LIST_HEAD(&cdma->sync_queue);
 
@@ -509,6 +555,59 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
        host1x_pushbuffer_push(pb, op1, op2);
 }
 
+/*
+ * Push four words into two consecutive push buffer slots. Note that extra
+ * care needs to be taken not to split the two slots across the end of the
+ * push buffer. Otherwise the RESTART opcode at the end of the push buffer
+ * that ensures processing will restart at the beginning will break up the
+ * four words.
+ *
+ * Blocks as necessary if the push buffer is full.
+ */
+void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
+                          u32 op3, u32 op4)
+{
+       struct host1x_channel *channel = cdma_to_channel(cdma);
+       struct host1x *host1x = cdma_to_host1x(cdma);
+       struct push_buffer *pb = &cdma->push_buffer;
+       unsigned int needed = 2, extra = 0, i;
+       unsigned int space = cdma->slots_free;
+
+       if (host1x_debug_trace_cmdbuf)
+               trace_host1x_cdma_push_wide(dev_name(channel->dev), op1, op2,
+                                           op3, op4);
+
+       /* compute number of extra slots needed for padding */
+       if (pb->pos + 16 > pb->size) {
+               extra = (pb->size - pb->pos) / 8;
+               needed += extra;
+       }
+
+       host1x_cdma_wait_pushbuffer_space(host1x, cdma, needed);
+       space = host1x_pushbuffer_space(pb);
+
+       cdma->slots_free = space - needed;
+       cdma->slots_used += needed;
+
+       /*
+        * Note that we rely on the fact that this is only used to submit wide
+        * gather opcodes, which consist of 3 words, and they are padded with
+        * a NOP to avoid having to deal with fractional slots (a slot always
+        * represents 2 words). The fourth opcode passed to this function will
+        * therefore always be a NOP.
+        *
+        * This works around a slight ambiguity when it comes to opcodes. For
+        * all current host1x incarnations the NOP opcode uses the exact same
+        * encoding (0x20000000), so we could hard-code the value here, but a
+        * new incarnation may change it and break that assumption.
+        */
+       for (i = 0; i < extra; i++)
+               host1x_pushbuffer_push(pb, op4, op4);
+
+       host1x_pushbuffer_push(pb, op1, op2);
+       host1x_pushbuffer_push(pb, op3, op4);
+}
+
 /*
  * End a cdma submit
  * Kick off DMA, add job to the sync queue, and a number of slots to be freed
index e97e17b82370c5307283a1de59ad139af23e895f..3a5e0408b8d17ed29edde00e3a9479c222a1a493 100644 (file)
@@ -20,7 +20,7 @@
 #define __HOST1X_CDMA_H
 
 #include <linux/sched.h>
-#include <linux/semaphore.h>
+#include <linux/completion.h>
 #include <linux/list.h>
 
 struct host1x_syncpt;
@@ -69,8 +69,8 @@ enum cdma_event {
 
 struct host1x_cdma {
        struct mutex lock;              /* controls access to shared state */
-       struct semaphore sem;           /* signalled when event occurs */
-       enum cdma_event event;          /* event that sem is waiting for */
+       struct completion complete;     /* signalled when event occurs */
+       enum cdma_event event;          /* event that complete is waiting for */
        unsigned int slots_used;        /* pb slots used in current submit */
        unsigned int slots_free;        /* pb slots free in current submit */
        unsigned int first_get;         /* DMAGET value, where submit begins */
@@ -90,6 +90,8 @@ int host1x_cdma_init(struct host1x_cdma *cdma);
 int host1x_cdma_deinit(struct host1x_cdma *cdma);
 int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
 void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
+void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
+                          u32 op3, u32 op4);
 void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
 void host1x_cdma_update(struct host1x_cdma *cdma);
 void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
index 419d8929a98f8f7ca2abf13d00ab3b8f3373f6b7..ee3c7b81a29d85c42080e32e3d62462fc96b76a1 100644 (file)
@@ -120,6 +120,15 @@ static const struct host1x_info host1x05_info = {
        .dma_mask = DMA_BIT_MASK(34),
 };
 
+static const struct host1x_sid_entry tegra186_sid_table[] = {
+       {
+               /* VIC */
+               .base = 0x1af0,
+               .offset = 0x30,
+               .limit = 0x34
+       },
+};
+
 static const struct host1x_info host1x06_info = {
        .nb_channels = 63,
        .nb_pts = 576,
@@ -127,8 +136,19 @@ static const struct host1x_info host1x06_info = {
        .nb_bases = 16,
        .init = host1x06_init,
        .sync_offset = 0x0,
-       .dma_mask = DMA_BIT_MASK(34),
+       .dma_mask = DMA_BIT_MASK(40),
        .has_hypervisor = true,
+       .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
+       .sid_table = tegra186_sid_table,
+};
+
+static const struct host1x_sid_entry tegra194_sid_table[] = {
+       {
+               /* VIC */
+               .base = 0x1af0,
+               .offset = 0x30,
+               .limit = 0x34
+       },
 };
 
 static const struct host1x_info host1x07_info = {
@@ -140,6 +160,8 @@ static const struct host1x_info host1x07_info = {
        .sync_offset = 0x0,
        .dma_mask = DMA_BIT_MASK(40),
        .has_hypervisor = true,
+       .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
+       .sid_table = tegra194_sid_table,
 };
 
 static const struct of_device_id host1x_of_match[] = {
@@ -154,6 +176,19 @@ static const struct of_device_id host1x_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, host1x_of_match);
 
+static void host1x_setup_sid_table(struct host1x *host)
+{
+       const struct host1x_info *info = host->info;
+       unsigned int i;
+
+       for (i = 0; i < info->num_sid_entries; i++) {
+               const struct host1x_sid_entry *entry = &info->sid_table[i];
+
+               host1x_hypervisor_writel(host, entry->offset, entry->base);
+               host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
+       }
+}
+
 static int host1x_probe(struct platform_device *pdev)
 {
        struct host1x *host;
@@ -248,6 +283,8 @@ static int host1x_probe(struct platform_device *pdev)
        host->group = iommu_group_get(&pdev->dev);
        if (host->group) {
                struct iommu_domain_geometry *geometry;
+               u64 mask = dma_get_mask(host->dev);
+               dma_addr_t start, end;
                unsigned long order;
 
                err = iova_cache_get();
@@ -275,11 +312,12 @@ static int host1x_probe(struct platform_device *pdev)
                }
 
                geometry = &host->domain->geometry;
+               start = geometry->aperture_start & mask;
+               end = geometry->aperture_end & mask;
 
                order = __ffs(host->domain->pgsize_bitmap);
-               init_iova_domain(&host->iova, 1UL << order,
-                                geometry->aperture_start >> order);
-               host->iova_end = geometry->aperture_end;
+               init_iova_domain(&host->iova, 1UL << order, start >> order);
+               host->iova_end = end;
        }
 
 skip_iommu:
@@ -316,6 +354,9 @@ skip_iommu:
 
        host1x_debug_init(host);
 
+       if (host->info->has_hypervisor)
+               host1x_setup_sid_table(host);
+
        err = host1x_register(host);
        if (err < 0)
                goto fail_deinit_intr;
index 36f44ffebe739a98d8195f611bdafa2b07318676..05216a7e4830936021677d01a6cee6903db6324c 100644 (file)
@@ -94,6 +94,12 @@ struct host1x_intr_ops {
        int (*free_syncpt_irq)(struct host1x *host);
 };
 
+struct host1x_sid_entry {
+       unsigned int base;
+       unsigned int offset;
+       unsigned int limit;
+};
+
 struct host1x_info {
        unsigned int nb_channels; /* host1x: number of channels supported */
        unsigned int nb_pts; /* host1x: number of syncpoints supported */
@@ -103,6 +109,8 @@ struct host1x_info {
        unsigned int sync_offset; /* offset of syncpoint registers */
        u64 dma_mask; /* mask of addressable memory */
        bool has_hypervisor; /* has hypervisor registers */
+       unsigned int num_sid_entries;
+       const struct host1x_sid_entry *sid_table;
 };
 
 struct host1x {
index ce320534cbed39aa3fc06aa1473a9187ed671e62..5d61088db2bb9047b8edcc16c08662f71ce9c859 100644 (file)
@@ -39,8 +39,6 @@ static void push_buffer_init(struct push_buffer *pb)
 static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
                                u32 syncpt_incrs, u32 syncval, u32 nr_slots)
 {
-       struct host1x *host1x = cdma_to_host1x(cdma);
-       struct push_buffer *pb = &cdma->push_buffer;
        unsigned int i;
 
        for (i = 0; i < syncpt_incrs; i++)
@@ -48,18 +46,6 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
 
        /* after CPU incr, ensure shadow is up to date */
        host1x_syncpt_load(cdma->timeout.syncpt);
-
-       /* NOP all the PB slots */
-       while (nr_slots--) {
-               u32 *p = (u32 *)(pb->mapped + getptr);
-               *(p++) = HOST1X_OPCODE_NOP;
-               *(p++) = HOST1X_OPCODE_NOP;
-               dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__,
-                       &pb->dma, getptr);
-               getptr = (getptr + 8) & (pb->size - 1);
-       }
-
-       wmb();
 }
 
 /*
@@ -68,20 +54,31 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
 static void cdma_start(struct host1x_cdma *cdma)
 {
        struct host1x_channel *ch = cdma_to_channel(cdma);
+       u64 start, end;
 
        if (cdma->running)
                return;
 
        cdma->last_pos = cdma->push_buffer.pos;
+       start = cdma->push_buffer.dma;
+       end = cdma->push_buffer.size + 4;
 
        host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
                         HOST1X_CHANNEL_DMACTRL);
 
        /* set base, put and end pointer */
-       host1x_ch_writel(ch, cdma->push_buffer.dma, HOST1X_CHANNEL_DMASTART);
+       host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART);
+#if HOST1X_HW >= 6
+       host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI);
+#endif
        host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
-       host1x_ch_writel(ch, cdma->push_buffer.dma + cdma->push_buffer.size + 4,
-                        HOST1X_CHANNEL_DMAEND);
+#if HOST1X_HW >= 6
+       host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMAPUT_HI);
+#endif
+       host1x_ch_writel(ch, lower_32_bits(end), HOST1X_CHANNEL_DMAEND);
+#if HOST1X_HW >= 6
+       host1x_ch_writel(ch, upper_32_bits(end), HOST1X_CHANNEL_DMAEND_HI);
+#endif
 
        /* reset GET */
        host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
@@ -104,6 +101,7 @@ static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
 {
        struct host1x *host1x = cdma_to_host1x(cdma);
        struct host1x_channel *ch = cdma_to_channel(cdma);
+       u64 start, end;
 
        if (cdma->running)
                return;
@@ -113,10 +111,18 @@ static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
        host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
                         HOST1X_CHANNEL_DMACTRL);
 
+       start = cdma->push_buffer.dma;
+       end = cdma->push_buffer.size + 4;
+
        /* set base, end pointer (all of memory) */
-       host1x_ch_writel(ch, cdma->push_buffer.dma, HOST1X_CHANNEL_DMASTART);
-       host1x_ch_writel(ch, cdma->push_buffer.dma + cdma->push_buffer.size,
-                        HOST1X_CHANNEL_DMAEND);
+       host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART);
+#if HOST1X_HW >= 6
+       host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI);
+#endif
+       host1x_ch_writel(ch, lower_32_bits(end), HOST1X_CHANNEL_DMAEND);
+#if HOST1X_HW >= 6
+       host1x_ch_writel(ch, upper_32_bits(end), HOST1X_CHANNEL_DMAEND_HI);
+#endif
 
        /* set GET, by loading the value in PUT (then reset GET) */
        host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
index 95ea81172a83460d95139428d64f980f460c3263..27101c04a8272668988ce5be66dfc584068f8a60 100644 (file)
@@ -17,6 +17,7 @@
  */
 
 #include <linux/host1x.h>
+#include <linux/iommu.h>
 #include <linux/slab.h>
 
 #include <trace/events/host1x.h>
@@ -60,15 +61,37 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
 static void submit_gathers(struct host1x_job *job)
 {
        struct host1x_cdma *cdma = &job->channel->cdma;
+#if HOST1X_HW < 6
+       struct device *dev = job->channel->dev;
+#endif
        unsigned int i;
 
        for (i = 0; i < job->num_gathers; i++) {
                struct host1x_job_gather *g = &job->gathers[i];
-               u32 op1 = host1x_opcode_gather(g->words);
-               u32 op2 = g->base + g->offset;
+               dma_addr_t addr = g->base + g->offset;
+               u32 op2, op3;
+
+               op2 = lower_32_bits(addr);
+               op3 = upper_32_bits(addr);
+
+               trace_write_gather(cdma, g->bo, g->offset, g->words);
+
+               if (op3 != 0) {
+#if HOST1X_HW >= 6
+                       u32 op1 = host1x_opcode_gather_wide(g->words);
+                       u32 op4 = HOST1X_OPCODE_NOP;
+
+                       host1x_cdma_push_wide(cdma, op1, op2, op3, op4);
+#else
+                       dev_err(dev, "invalid gather for push buffer %pad\n",
+                               &addr);
+                       continue;
+#endif
+               } else {
+                       u32 op1 = host1x_opcode_gather(g->words);
 
-               trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
-               host1x_cdma_push(cdma, op1, op2);
+                       host1x_cdma_push(cdma, op1, op2);
+               }
        }
 }
 
@@ -89,6 +112,16 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
                         HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
 }
 
+static void host1x_channel_set_streamid(struct host1x_channel *channel)
+{
+#if HOST1X_HW >= 6
+       struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
+       u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
+
+       host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
+#endif
+}
+
 static int channel_submit(struct host1x_job *job)
 {
        struct host1x_channel *ch = job->channel;
@@ -120,6 +153,8 @@ static int channel_submit(struct host1x_job *job)
                goto error;
        }
 
+       host1x_channel_set_streamid(ch);
+
        /* begin a CDMA submit */
        err = host1x_cdma_begin(&ch->cdma, job);
        if (err) {
index 3039c92ea605ca8b2b864f63e6f39da1f70aef64..dd37b10c8d043391b5ab49dd0dfe977fb6ce62c7 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/types.h>
 #include <linux/bitops.h>
 
+#include "hw_host1x06_channel.h"
 #include "hw_host1x06_uclass.h"
 #include "hw_host1x06_vm.h"
 #include "hw_host1x06_hypervisor.h"
@@ -137,6 +138,11 @@ static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
        return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
 }
 
+static inline u32 host1x_opcode_gather_wide(unsigned count)
+{
+       return (12 << 28) | count;
+}
+
 #define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
 
 #endif
index 1353e7ab71dd49686327d44cb89e7b524c34d2a3..9f6da4ee54435087b2031a43a62c9d3bb965222b 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/types.h>
 #include <linux/bitops.h>
 
+#include "hw_host1x07_channel.h"
 #include "hw_host1x07_uclass.h"
 #include "hw_host1x07_vm.h"
 #include "hw_host1x07_hypervisor.h"
@@ -137,6 +138,11 @@ static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
        return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
 }
 
+static inline u32 host1x_opcode_gather_wide(unsigned count)
+{
+       return (12 << 28) | count;
+}
+
 #define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
 
 #endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_channel.h b/drivers/gpu/host1x/hw/hw_host1x06_channel.h
new file mode 100644 (file)
index 0000000..18ae1c5
--- /dev/null
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 NVIDIA Corporation.
+ */
+
+#ifndef HOST1X_HW_HOST1X06_CHANNEL_H
+#define HOST1X_HW_HOST1X06_CHANNEL_H
+
+#define HOST1X_CHANNEL_SMMU_STREAMID 0x084
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_channel.h b/drivers/gpu/host1x/hw/hw_host1x07_channel.h
new file mode 100644 (file)
index 0000000..96fa72b
--- /dev/null
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 NVIDIA Corporation.
+ */
+
+#ifndef HOST1X_HW_HOST1X07_CHANNEL_H
+#define HOST1X_HW_HOST1X07_CHANNEL_H
+
+#define HOST1X_CHANNEL_SMMU_STREAMID 0x084
+
+#endif
index cc6532d8c2fa3c998422f744feddfb577281b20b..b0d73d5fba5dd95ee91c19497869337e64f1ae41 100644 (file)
@@ -221,8 +221,7 @@ static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-           DRIVER_PRIME | DRIVER_ATOMIC,
+           DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
        .dev_priv_size = 0,
 
        .lastclose = drm_fb_helper_lastclose,
index 6b7aa23dfc0aaf99eefc54979fa2f94a49f65ee6..397496cf0bdf605d017fb9ff8015910dba7f6790 100644 (file)
@@ -95,11 +95,6 @@ int vboxfb_create(struct drm_fb_helper *helper,
 
        strcpy(info->fix.id, "vboxdrmfb");
 
-       /*
-        * The last flag forces a mode set on VT switches even if the kernel
-        * does not think it is needed.
-        */
-       info->flags = FBINFO_DEFAULT | FBINFO_MISC_ALWAYS_SETPAR;
        info->fbops = &vboxfb_ops;
 
        /*
index f3d9895c79d8aea3d4f0066521454a8a681d4120..195484713365dc50d95e89e21c2e680c616ae629 100644 (file)
@@ -9,7 +9,9 @@
  *          Hans de Goede <hdegoede@redhat.com>
  */
 
-#include <drm/drm_crtc_helper.h>
+#include <linux/pci.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
 
 #include "vbox_drv.h"
 #include "vboxvideo.h"
index c43bec4628aec565d7084cce8fa5dc7625f3d8a3..1aaff02c07ff8e4c4f14bb598a04615dd6174772 100644 (file)
@@ -11,9 +11,9 @@
  */
 #include <linux/export.h>
 #include <drm/drm_atomic.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "vbox_drv.h"
 #include "vboxvideo.h"
index 9c56412bb2cf2e0e3e64bb06306f17bfb76a362f..66e70770cce5dda55e673200a6f1779aa4124098 100644 (file)
 #ifndef __DW_HDMI__
 #define __DW_HDMI__
 
-#include <drm/drmP.h>
-
+struct drm_connector;
+struct drm_display_mode;
+struct drm_encoder;
 struct dw_hdmi;
+struct platform_device;
 
 /**
  * DOC: Supported input formats and encodings
@@ -157,6 +159,7 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
 void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
 void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
+void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi);
 
 /* PHY configuration */
 void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
index 48a671e782cafd6ec3ef405f615802c187733ff5..7d3dd69a5caa73a6b23ee11f74c5d4a98f640028 100644 (file)
@@ -14,7 +14,8 @@ struct dw_mipi_dsi;
 
 struct dw_mipi_dsi_phy_ops {
        int (*init)(void *priv_data);
-       int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode,
+       int (*get_lane_mbps)(void *priv_data,
+                            const struct drm_display_mode *mode,
                             unsigned long mode_flags, u32 lanes, u32 format,
                             unsigned int *lane_mbps);
 };
index db94ef00940e20b1b6bb2f6e0ea3441c2a9bc506..3f5c577c9dbde93d7f8ba8bff046434eb1087880 100644 (file)
@@ -94,23 +94,11 @@ struct dma_buf_attachment;
 struct pci_dev;
 struct pci_controller;
 
-#define DRM_SWITCH_POWER_ON 0
-#define DRM_SWITCH_POWER_OFF 1
-#define DRM_SWITCH_POWER_CHANGING 2
-#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
-
-/* returns true if currently okay to sleep */
-static inline bool drm_can_sleep(void)
-{
-       if (in_atomic() || in_dbg_master() || irqs_disabled())
-               return false;
-       return true;
-}
-
-#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
-#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
-#else
-#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
-#endif
+/*
+ * NOTE: drmP.h is obsolete - do NOT add anything to this file
+ *
+ * Do not include drmP.h in new files.
+ * Work is ongoing to remove drmP.h includes from existing files
+ */
 
 #endif
index cac4a1b6b0e8633b4f3ab21de492abea372308c6..811b4a92568f463c16d3c5d9fbca9cda9efd049e 100644 (file)
@@ -139,9 +139,9 @@ struct drm_crtc_commit {
        /**
         * @abort_completion:
         *
-        * A flag that's set after drm_atomic_helper_setup_commit takes a second
-        * reference for the completion of $drm_crtc_state.event. It's used by
-        * the free code to remove the second reference if commit fails.
+        * A flag that's set after drm_atomic_helper_setup_commit() takes a
+        * second reference for the completion of $drm_crtc_state.event. It's
+        * used by the free code to remove the second reference if commit fails.
         */
        bool abort_completion;
 };
index bd850747ce5472fe477919db42f4cb5d1911f61f..9da8c93f79764ab5c246afa612a7eb748962153a 100644 (file)
@@ -196,8 +196,8 @@ struct drm_bridge_funcs {
         * the DRM framework will have to be extended with DRM bridge states.
         */
        void (*mode_set)(struct drm_bridge *bridge,
-                        struct drm_display_mode *mode,
-                        struct drm_display_mode *adjusted_mode);
+                        const struct drm_display_mode *mode,
+                        const struct drm_display_mode *adjusted_mode);
        /**
         * @pre_enable:
         *
@@ -310,8 +310,8 @@ enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
 void drm_bridge_disable(struct drm_bridge *bridge);
 void drm_bridge_post_disable(struct drm_bridge *bridge);
 void drm_bridge_mode_set(struct drm_bridge *bridge,
-                        struct drm_display_mode *mode,
-                        struct drm_display_mode *adjusted_mode);
+                        const struct drm_display_mode *mode,
+                        const struct drm_display_mode *adjusted_mode);
 void drm_bridge_pre_enable(struct drm_bridge *bridge);
 void drm_bridge_enable(struct drm_bridge *bridge);
 
index 90ef9996d9a4ab2896f34b1e87ad72e6cc44749c..d1c662d92ab7118b661a42cd78fe392a78d33f0c 100644 (file)
@@ -69,4 +69,32 @@ int drm_plane_create_color_properties(struct drm_plane *plane,
                                      u32 supported_ranges,
                                      enum drm_color_encoding default_encoding,
                                      enum drm_color_range default_range);
+
+/**
+ * enum drm_color_lut_tests - hw-specific LUT tests to perform
+ *
+ * The drm_color_lut_check() function takes a bitmask of the values here to
+ * determine which tests to apply to a userspace-provided LUT.
+ */
+enum drm_color_lut_tests {
+       /**
+        * @DRM_COLOR_LUT_EQUAL_CHANNELS:
+        *
+        * Checks whether the entries of a LUT all have equal values for the
+        * red, green, and blue channels.  Intended for hardware that only
+        * accepts a single value per LUT entry and assumes that value applies
+        * to all three color components.
+        */
+       DRM_COLOR_LUT_EQUAL_CHANNELS = BIT(0),
+
+       /**
+        * @DRM_COLOR_LUT_NON_DECREASING:
+        *
+        * Checks whether the entries of a LUT are always flat or increasing
+        * (never decreasing).
+        */
+       DRM_COLOR_LUT_NON_DECREASING = BIT(1),
+};
+
+int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests);
 #endif
index f82701d49ea680556ea4f00fedcf570928e295ef..994161374a4928e33e668674f8120589d6661bc1 100644 (file)
@@ -365,6 +365,12 @@ struct drm_display_info {
         */
        bool has_hdmi_infoframe;
 
+       /**
+        * @rgb_quant_range_selectable: Does the sink support selecting
+        * the RGB quantization range?
+        */
+       bool rgb_quant_range_selectable;
+
        /**
         * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
         * more stuff redundant with @bus_formats.
index 39c3900aab3cea107a0771b6fe0d5dc510ac4c11..85abd3fe9e832d4bad14a710e69504821e4bcd3d 100644 (file)
@@ -1149,9 +1149,6 @@ static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
        return 1 << drm_crtc_index(crtc);
 }
 
-int drm_crtc_force_disable(struct drm_crtc *crtc);
-int drm_crtc_force_disable_all(struct drm_device *dev);
-
 int drm_mode_set_config_internal(struct drm_mode_set *set);
 struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx);
 
index d65f034843cea620a3d99ff7b28261e8e33122a9..a6d520d5b6ca787dd787cb765a33211fed7dcb53 100644 (file)
@@ -56,21 +56,6 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 void drm_helper_resume_force_mode(struct drm_device *dev);
-
-/* drm_probe_helper.c */
-int drm_helper_probe_single_connector_modes(struct drm_connector
-                                           *connector, uint32_t maxX,
-                                           uint32_t maxY);
-int drm_helper_probe_detect(struct drm_connector *connector,
-                           struct drm_modeset_acquire_ctx *ctx,
-                           bool force);
-void drm_kms_helper_poll_init(struct drm_device *dev);
-void drm_kms_helper_poll_fini(struct drm_device *dev);
-bool drm_helper_hpd_irq_event(struct drm_device *dev);
-void drm_kms_helper_hotplug_event(struct drm_device *dev);
-
-void drm_kms_helper_poll_disable(struct drm_device *dev);
-void drm_kms_helper_poll_enable(struct drm_device *dev);
-bool drm_kms_helper_is_poll_worker(void);
+int drm_helper_force_disable_all(struct drm_device *dev);
 
 #endif
index 4487660b26b8d198ba25f64728f88e09adccaf97..40c34a5bf1498223760b99146dd99159697abb11 100644 (file)
@@ -78,6 +78,9 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
 bool
 drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
                                   struct drm_rect *rect);
+bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
+                                    struct drm_plane_state *state,
+                                    struct drm_rect *rect);
 
 /**
  * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
index 42411b3ea0c82e4133b4f5fc97dfe1150c99f63b..d5e092dccf3e5578c90356e0bab6f929116dac74 100644 (file)
@@ -24,25 +24,79 @@ struct inode;
 struct pci_dev;
 struct pci_controller;
 
+
 /**
- * DRM device structure. This structure represent a complete card that
+ * enum drm_switch_power - power state of drm device
+ */
+
+enum switch_power_state {
+       /** @DRM_SWITCH_POWER_ON: Power state is ON */
+       DRM_SWITCH_POWER_ON = 0,
+
+       /** @DRM_SWITCH_POWER_OFF: Power state is OFF */
+       DRM_SWITCH_POWER_OFF = 1,
+
+       /** @DRM_SWITCH_POWER_CHANGING: Power state is changing */
+       DRM_SWITCH_POWER_CHANGING = 2,
+
+       /** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */
+       DRM_SWITCH_POWER_DYNAMIC_OFF = 3,
+};
+
+/**
+ * struct drm_device - DRM device structure
+ *
+ * This structure represent a complete card that
  * may contain multiple heads.
  */
 struct drm_device {
-       struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
-       int if_version;                 /**< Highest interface version set */
-
-       /** \name Lifetime Management */
-       /*@{ */
-       struct kref ref;                /**< Object ref-count */
-       struct device *dev;             /**< Device structure of bus-device */
-       struct drm_driver *driver;      /**< DRM driver managing the device */
-       void *dev_private;              /**< DRM driver private data */
-       struct drm_minor *primary;              /**< Primary node */
-       struct drm_minor *render;               /**< Render node */
+       /**
+        * @legacy_dev_list:
+        *
+        * List of devices per driver for stealth attach cleanup
+        */
+       struct list_head legacy_dev_list;
+
+       /** @if_version: Highest interface version set */
+       int if_version;
+
+       /** @ref: Object ref-count */
+       struct kref ref;
+
+       /** @dev: Device structure of bus-device */
+       struct device *dev;
+
+       /** @driver: DRM driver managing the device */
+       struct drm_driver *driver;
+
+       /**
+        * @dev_private:
+        *
+        * DRM driver private data. Instead of using this pointer it is
+        * recommended that drivers use drm_dev_init() and embed struct
+        * &drm_device in their larger per-device structure.
+        */
+       void *dev_private;
+
+       /** @primary: Primary node */
+       struct drm_minor *primary;
+
+       /** @render: Render node */
+       struct drm_minor *render;
+
+       /**
+        * @registered:
+        *
+        * Internally used by drm_dev_register() and drm_connector_register().
+        */
        bool registered;
 
-       /* currently active master for this device. Protected by master_mutex */
+       /**
+        * @master:
+        *
+        * Currently active master for this device.
+        * Protected by &master_mutex
+        */
        struct drm_master *master;
 
        /**
@@ -63,76 +117,65 @@ struct drm_device {
         */
        bool unplugged;
 
-       struct inode *anon_inode;               /**< inode for private address-space */
-       char *unique;                           /**< unique name of the device */
-       /*@} */
+       /** @anon_inode: inode for private address-space */
+       struct inode *anon_inode;
+
+       /** @unique: Unique name of the device */
+       char *unique;
 
-       /** \name Locks */
-       /*@{ */
-       struct mutex struct_mutex;      /**< For others */
-       struct mutex master_mutex;      /**< For drm_minor::master and drm_file::is_master */
-       /*@} */
+       /**
+        * @struct_mutex:
+        *
+        * Lock for others (not &drm_minor.master and &drm_file.is_master)
+        */
+       struct mutex struct_mutex;
 
-       /** \name Usage Counters */
-       /*@{ */
-       int open_count;                 /**< Outstanding files open, protected by drm_global_mutex. */
-       spinlock_t buf_lock;            /**< For drm_device::buf_use and a few other things. */
-       int buf_use;                    /**< Buffers in use -- cannot alloc */
-       atomic_t buf_alloc;             /**< Buffer allocation in progress */
-       /*@} */
+       /**
+        * @master_mutex:
+        *
+        * Lock for &drm_minor.master and &drm_file.is_master
+        */
+       struct mutex master_mutex;
+
+       /**
+        * @open_count:
+        *
+        * Usage counter for outstanding files open,
+        * protected by drm_global_mutex
+        */
+       int open_count;
 
+       /** @filelist_mutex: Protects @filelist. */
        struct mutex filelist_mutex;
+       /**
+        * @filelist:
+        *
+        * List of userspace clients, linked through &drm_file.lhead.
+        */
        struct list_head filelist;
 
        /**
         * @filelist_internal:
         *
-        * List of open DRM files for in-kernel clients. Protected by @filelist_mutex.
+        * List of open DRM files for in-kernel clients.
+        * Protected by &filelist_mutex.
         */
        struct list_head filelist_internal;
 
        /**
         * @clientlist_mutex:
         *
-        * Protects @clientlist access.
+        * Protects &clientlist access.
         */
        struct mutex clientlist_mutex;
 
        /**
         * @clientlist:
         *
-        * List of in-kernel clients. Protected by @clientlist_mutex.
+        * List of in-kernel clients. Protected by &clientlist_mutex.
         */
        struct list_head clientlist;
 
-       /** \name Memory management */
-       /*@{ */
-       struct list_head maplist;       /**< Linked list of regions */
-       struct drm_open_hash map_hash;  /**< User token hash table for maps */
-
-       /** \name Context handle management */
-       /*@{ */
-       struct list_head ctxlist;       /**< Linked list of context handles */
-       struct mutex ctxlist_mutex;     /**< For ctxlist */
-
-       struct idr ctx_idr;
-
-       struct list_head vmalist;       /**< List of vmas (for debugging) */
-
-       /*@} */
-
-       /** \name DMA support */
-       /*@{ */
-       struct drm_device_dma *dma;             /**< Optional pointer for DMA support */
-       /*@} */
-
-       /** \name Context support */
-       /*@{ */
-
-       __volatile__ long context_flag; /**< Context swapping flag */
-       int last_context;               /**< Last current context */
-       /*@} */
-
        /**
         * @irq_enabled:
         *
@@ -141,6 +184,10 @@ struct drm_device {
         * to true manually.
         */
        bool irq_enabled;
+
+       /**
+        * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers.
+        */
        int irq;
 
        /**
@@ -168,7 +215,16 @@ struct drm_device {
         */
        struct drm_vblank_crtc *vblank;
 
-       spinlock_t vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
+       /**
+        * @vblank_time_lock:
+        *
+        *  Protects vblank count and time updates during vblank enable/disable
+        */
+       spinlock_t vblank_time_lock;
+       /**
+        * @vbl_lock: Top-level vblank references lock, wraps the low-level
+        * @vblank_time_lock.
+        */
        spinlock_t vbl_lock;
 
        /**
@@ -184,45 +240,61 @@ struct drm_device {
         * races and imprecision over longer time periods, hence exposing a
         * hardware vblank counter is always recommended.
         *
-        * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set.
+        * This is the statically configured device wide maximum. The driver
+        * can instead choose to use a runtime configurable per-crtc value
+        * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
+        * must be left at zero. See drm_crtc_set_max_vblank_count() on how
+        * to use the per-crtc value.
+        *
+        * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
         */
-       u32 max_vblank_count;           /**< size of vblank counter register */
+       u32 max_vblank_count;
+
+       /** @vblank_event_list: List of vblank events */
+       struct list_head vblank_event_list;
 
        /**
-        * List of events
+        * @event_lock:
+        *
+        * Protects @vblank_event_list and event delivery in
+        * general. See drm_send_event() and drm_send_event_locked().
         */
-       struct list_head vblank_event_list;
        spinlock_t event_lock;
 
-       /*@} */
+       /** @agp: AGP data */
+       struct drm_agp_head *agp;
 
-       struct drm_agp_head *agp;       /**< AGP data */
+       /** @pdev: PCI device structure */
+       struct pci_dev *pdev;
 
-       struct pci_dev *pdev;           /**< PCI device structure */
 #ifdef __alpha__
+       /** @hose: PCI hose, only used on ALPHA platforms. */
        struct pci_controller *hose;
 #endif
+       /** @num_crtcs: Number of CRTCs on this device */
+       unsigned int num_crtcs;
 
-       struct drm_sg_mem *sg;  /**< Scatter gather memory */
-       unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
+       /** @mode_config: Current mode config */
+       struct drm_mode_config mode_config;
 
-       struct {
-               int context;
-               struct drm_hw_lock *lock;
-       } sigdata;
-
-       struct drm_local_map *agp_buffer_map;
-       unsigned int agp_buffer_token;
-
-       struct drm_mode_config mode_config;     /**< Current mode config */
-
-       /** \name GEM information */
-       /*@{ */
+       /** @object_name_lock: GEM information */
        struct mutex object_name_lock;
+
+       /** @object_name_idr: GEM information */
        struct idr object_name_idr;
+
+       /** @vma_offset_manager: GEM information */
        struct drm_vma_offset_manager *vma_offset_manager;
-       /*@} */
-       int switch_power_state;
+
+       /**
+        * @switch_power_state:
+        *
+        * Power state of the client.
+        * Used by drivers supporting the switcheroo driver.
+        * The state is maintained in the
+        * &vga_switcheroo_client_ops.set_gpu_state callback
+        */
+       enum switch_power_state switch_power_state;
 
        /**
         * @fb_helper:
@@ -231,6 +303,56 @@ struct drm_device {
         * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
         */
        struct drm_fb_helper *fb_helper;
+
+       /* Everything below here is for legacy driver, never use! */
+       /* private: */
+
+       /* Context handle management - linked list of context handles */
+       struct list_head ctxlist;
+
+       /* Context handle management - mutex for &ctxlist */
+       struct mutex ctxlist_mutex;
+
+       /* Context handle management */
+       struct idr ctx_idr;
+
+       /* Memory management - linked list of regions */
+       struct list_head maplist;
+
+       /* Memory management - user token hash table for maps */
+       struct drm_open_hash map_hash;
+
+       /* Context handle management - list of vmas (for debugging) */
+       struct list_head vmalist;
+
+       /* Optional pointer for DMA support */
+       struct drm_device_dma *dma;
+
+       /* Context swapping flag */
+       __volatile__ long context_flag;
+
+       /* Last current context */
+       int last_context;
+
+       /* Lock for &buf_use and a few other things. */
+       spinlock_t buf_lock;
+
+       /* Usage counter for buffers in use -- cannot alloc */
+       int buf_use;
+
+       /* Buffer allocation in progress */
+       atomic_t buf_alloc;
+
+       struct {
+               int context;
+               struct drm_hw_lock *lock;
+       } sigdata;
+
+       struct drm_local_map *agp_buffer_map;
+       unsigned int agp_buffer_token;
+
+       /* Scatter gather memory */
+       struct drm_sg_mem *sg;
 };
 
 #endif
index 5736c942c85b7d9d96707e9160d98d5dbe116cb1..5db7fb8c8b50ae7b3dec6daf8ba7aec3248da140 100644 (file)
 # define DP_PSR_SETUP_TIME_SHIFT            1
 # define DP_PSR2_SU_Y_COORDINATE_REQUIRED   (1 << 4)  /* eDP 1.4a */
 # define DP_PSR2_SU_GRANULARITY_REQUIRED    (1 << 5)  /* eDP 1.4b */
+
+#define DP_PSR2_SU_X_GRANULARITY           0x072 /* eDP 1.4b */
+#define DP_PSR2_SU_Y_GRANULARITY           0x074 /* eDP 1.4b */
+
 /*
  * 0x80-0x8f describe downstream port capabilities, but there are two layouts
  * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set.  If it was not,
 #define DP_PEER_DEVICE_DP_LEGACY_CONV  0x4
 
 /* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */
+#define DP_GET_MSG_TRANSACTION_VERSION 0x00 /* DP 1.3 */
 #define DP_LINK_ADDRESS                        0x01
 #define DP_CONNECTION_STATUS_NOTIFY    0x02
 #define DP_ENUM_PATH_RESOURCES         0x10
 #define DP_SINK_EVENT_NOTIFY           0x30
 #define DP_QUERY_STREAM_ENC_STATUS     0x38
 
+/* DP 1.2 MST sideband reply types */
+#define DP_SIDEBAND_REPLY_ACK          0x00
+#define DP_SIDEBAND_REPLY_NAK          0x01
+
 /* DP 1.2 MST sideband nak reasons - table 2.84 */
 #define DP_NAK_WRITE_FAILURE           0x01
 #define DP_NAK_INVALID_READ            0x02
@@ -1365,6 +1374,13 @@ enum drm_dp_quirk {
         * to 16 bits. So will give a constant value (0x8000) for compatability.
         */
        DP_DPCD_QUIRK_CONSTANT_N,
+       /**
+        * @DP_DPCD_QUIRK_NO_PSR:
+        *
+        * The device does not support PSR even if reports that it supports or
+        * driver still need to implement proper handling for such device.
+        */
+       DP_DPCD_QUIRK_NO_PSR,
 };
 
 /**
index 371cc281647761804fbcd30583c31b171b6520b1..451d020f0137b924218967990bf7e76a62141041 100644 (file)
@@ -44,7 +44,6 @@ struct drm_dp_vcpi {
 
 /**
  * struct drm_dp_mst_port - MST port
- * @kref: reference count for this port.
  * @port_num: port number
  * @input: if this port is an input port.
  * @mcs: message capability status - DP 1.2 spec.
@@ -67,7 +66,18 @@ struct drm_dp_vcpi {
  * in the MST topology.
  */
 struct drm_dp_mst_port {
-       struct kref kref;
+       /**
+        * @topology_kref: refcount for this port's lifetime in the topology,
+        * only the DP MST helpers should need to touch this
+        */
+       struct kref topology_kref;
+
+       /**
+        * @malloc_kref: refcount for the memory allocation containing this
+        * structure. See drm_dp_mst_get_port_malloc() and
+        * drm_dp_mst_put_port_malloc().
+        */
+       struct kref malloc_kref;
 
        u8 port_num;
        bool input;
@@ -102,7 +112,6 @@ struct drm_dp_mst_port {
 
 /**
  * struct drm_dp_mst_branch - MST branch device.
- * @kref: reference count for this port.
  * @rad: Relative Address to talk to this branch device.
  * @lct: Link count total to talk to this branch device.
  * @num_ports: number of ports on the branch.
@@ -121,7 +130,19 @@ struct drm_dp_mst_port {
  * to downstream port of parent branches.
  */
 struct drm_dp_mst_branch {
-       struct kref kref;
+       /**
+        * @topology_kref: refcount for this branch device's lifetime in the
+        * topology, only the DP MST helpers should need to touch this
+        */
+       struct kref topology_kref;
+
+       /**
+        * @malloc_kref: refcount for the memory allocation containing this
+        * structure. See drm_dp_mst_get_mstb_malloc() and
+        * drm_dp_mst_put_mstb_malloc().
+        */
+       struct kref malloc_kref;
+
        u8 rad[8];
        u8 lct;
        int num_ports;
@@ -404,9 +425,15 @@ struct drm_dp_payload {
 
 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
 
+struct drm_dp_vcpi_allocation {
+       struct drm_dp_mst_port *port;
+       int vcpi;
+       struct list_head next;
+};
+
 struct drm_dp_mst_topology_state {
        struct drm_private_state base;
-       int avail_slots;
+       struct list_head vcpis;
        struct drm_dp_mst_topology_mgr *mgr;
 };
 
@@ -617,13 +644,115 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
                                                                    struct drm_dp_mst_topology_mgr *mgr);
-int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
-                                 struct drm_dp_mst_topology_mgr *mgr,
-                                 struct drm_dp_mst_port *port, int pbn);
-int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
-                                    struct drm_dp_mst_topology_mgr *mgr,
-                                    int slots);
+int __must_check
+drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+                             struct drm_dp_mst_topology_mgr *mgr,
+                             struct drm_dp_mst_port *port, int pbn);
+int __must_check
+drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+                                struct drm_dp_mst_topology_mgr *mgr,
+                                struct drm_dp_mst_port *port);
 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
                                 struct drm_dp_mst_port *port, bool power_up);
+int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
+
+void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
+void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+
+extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
+
+/**
+ * __drm_dp_mst_state_iter_get - private atomic state iterator function for
+ * macro-internal use
+ * @state: &struct drm_atomic_state pointer
+ * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
+ * iteration cursor
+ * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
+ * iteration cursor
+ * @i: int iteration cursor, for macro-internal use
+ *
+ * Used by for_each_oldnew_mst_mgr_in_state(),
+ * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
+ * call this directly.
+ *
+ * Returns:
+ * True if the current &struct drm_private_obj is a &struct
+ * drm_dp_mst_topology_mgr, false otherwise.
+ */
+static inline bool
+__drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
+                           struct drm_dp_mst_topology_mgr **mgr,
+                           struct drm_dp_mst_topology_state **old_state,
+                           struct drm_dp_mst_topology_state **new_state,
+                           int i)
+{
+       struct __drm_private_objs_state *objs_state = &state->private_objs[i];
+
+       if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
+               return false;
+
+       *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
+       if (old_state)
+               *old_state = to_dp_mst_topology_state(objs_state->old_state);
+       if (new_state)
+               *new_state = to_dp_mst_topology_state(objs_state->new_state);
+
+       return true;
+}
+
+/**
+ * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
+ * managers in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
+ * state
+ * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking both old and new state. This is useful in places where the state
+ * delta needs to be considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
+       for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+               for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
+
+/**
+ * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
+ * in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking only the old state. This is useful in disable functions, where we
+ * need the old state the hardware is still in.
+ */
+#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
+       for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+               for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
+
+/**
+ * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
+ * in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking only the new state. This is useful in enable functions, where we
+ * need the new state the hardware should be in when the atomic commit
+ * operation has completed.
+ */
+#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
+       for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+               for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
 
 #endif
index 35af23f5fa0d081a97eef6fa8e2e805b1e768669..ca46a45a9cce2ae7f4638ed5c5794a71baf6d104 100644 (file)
@@ -41,21 +41,113 @@ struct drm_display_mode;
 struct drm_mode_create_dumb;
 struct drm_printer;
 
-/* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP                 0x1
-#define DRIVER_LEGACY                  0x2
-#define DRIVER_PCI_DMA                 0x8
-#define DRIVER_SG                      0x10
-#define DRIVER_HAVE_DMA                        0x20
-#define DRIVER_HAVE_IRQ                        0x40
-#define DRIVER_IRQ_SHARED              0x80
-#define DRIVER_GEM                     0x1000
-#define DRIVER_MODESET                 0x2000
-#define DRIVER_PRIME                   0x4000
-#define DRIVER_RENDER                  0x8000
-#define DRIVER_ATOMIC                  0x10000
-#define DRIVER_KMS_LEGACY_CONTEXT      0x20000
-#define DRIVER_SYNCOBJ                  0x40000
+/**
+ * enum drm_driver_feature - feature flags
+ *
+ * See &drm_driver.driver_features, drm_device.driver_features and
+ * drm_core_check_feature().
+ */
+enum drm_driver_feature {
+       /**
+        * @DRIVER_GEM:
+        *
+        * Driver use the GEM memory manager. This should be set for all modern
+        * drivers.
+        */
+       DRIVER_GEM                      = BIT(0),
+       /**
+        * @DRIVER_MODESET:
+        *
+        * Driver supports mode setting interfaces (KMS).
+        */
+       DRIVER_MODESET                  = BIT(1),
+       /**
+        * @DRIVER_PRIME:
+        *
+        * Driver implements DRM PRIME buffer sharing.
+        */
+       DRIVER_PRIME                    = BIT(2),
+       /**
+        * @DRIVER_RENDER:
+        *
+        * Driver supports dedicated render nodes. See also the :ref:`section on
+        * render nodes <drm_render_node>` for details.
+        */
+       DRIVER_RENDER                   = BIT(3),
+       /**
+        * @DRIVER_ATOMIC:
+        *
+        * Driver supports the full atomic modesetting userspace API. Drivers
+        * which only use atomic internally, but do not the support the full
+        * userspace API (e.g. not all properties converted to atomic, or
+        * multi-plane updates are not guaranteed to be tear-free) should not
+        * set this flag.
+        */
+       DRIVER_ATOMIC                   = BIT(4),
+       /**
+        * @DRIVER_SYNCOBJ:
+        *
+        * Driver supports &drm_syncobj for explicit synchronization of command
+        * submission.
+        */
+       DRIVER_SYNCOBJ                  = BIT(5),
+
+       /* IMPORTANT: Below are all the legacy flags, add new ones above. */
+
+       /**
+        * @DRIVER_USE_AGP:
+        *
+        * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage
+        * AGP resources. New drivers don't need this.
+        */
+       DRIVER_USE_AGP                  = BIT(25),
+       /**
+        * @DRIVER_LEGACY:
+        *
+        * Denote a legacy driver using shadow attach. Do not use.
+        */
+       DRIVER_LEGACY                   = BIT(26),
+       /**
+        * @DRIVER_PCI_DMA:
+        *
+        * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace
+        * will be enabled. Only for legacy drivers. Do not use.
+        */
+       DRIVER_PCI_DMA                  = BIT(27),
+       /**
+        * @DRIVER_SG:
+        *
+        * Driver can perform scatter/gather DMA, allocation and mapping of
+        * scatter/gather buffers will be enabled. Only for legacy drivers. Do
+        * not use.
+        */
+       DRIVER_SG                       = BIT(28),
+
+       /**
+        * @DRIVER_HAVE_DMA:
+        *
+        * Driver supports DMA, the userspace DMA API will be supported. Only
+        * for legacy drivers. Do not use.
+        */
+       DRIVER_HAVE_DMA                 = BIT(29),
+       /**
+        * @DRIVER_HAVE_IRQ:
+        *
+        * Legacy irq support. Only for legacy drivers. Do not use.
+        *
+        * New drivers can either use the drm_irq_install() and
+        * drm_irq_uninstall() helper functions, or roll their own irq support
+        * code by calling request_irq() directly.
+        */
+       DRIVER_HAVE_IRQ                 = BIT(30),
+       /**
+        * @DRIVER_KMS_LEGACY_CONTEXT:
+        *
+        * Used only by nouveau for backwards compatibility with existing
+        * userspace.  Do not use.
+        */
+       DRIVER_KMS_LEGACY_CONTEXT       = BIT(31),
+};
 
 /**
  * struct drm_driver - DRM driver structure
@@ -579,7 +671,12 @@ struct drm_driver {
        /** @date: driver date */
        char *date;
 
-       /** @driver_features: driver features */
+       /**
+        * @driver_features:
+        * Driver features, see &enum drm_driver_feature. Drivers can disable
+        * some features on a per-instance basis using
+        * &drm_device.driver_features.
+        */
        u32 driver_features;
 
        /**
@@ -643,6 +740,10 @@ void drm_dev_unplug(struct drm_device *dev);
  * Unplugging itself is singalled through drm_dev_unplug(). If a device is
  * unplugged, these two functions guarantee that any store before calling
  * drm_dev_unplug() is visible to callers of this function after it completes
+ *
+ * WARNING: This function fundamentally races against drm_dev_unplug(). It is
+ * recommended that drivers instead use the underlying drm_dev_enter() and
+ * drm_dev_exit() function pairs.
  */
 static inline bool drm_dev_is_unplugged(struct drm_device *dev)
 {
@@ -662,7 +763,7 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
  * @feature: feature flag
  *
  * This checks @dev for driver features, see &drm_driver.driver_features,
- * &drm_device.driver_features, and the various DRIVER_\* flags.
+ * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
  *
  * Returns true if the @feature is supported, false otherwise.
  */
index e3c40483311569f26bac07cfbe007986860d22d0..8dc1a081fb36d112e950fbdb80444470b43bb391 100644 (file)
@@ -352,18 +352,17 @@ drm_load_edid_firmware(struct drm_connector *connector)
 
 int
 drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
-                                        const struct drm_display_mode *mode,
-                                        bool is_hdmi2_sink);
+                                        struct drm_connector *connector,
+                                        const struct drm_display_mode *mode);
 int
 drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
                                            struct drm_connector *connector,
                                            const struct drm_display_mode *mode);
 void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
+                                  struct drm_connector *connector,
                                   const struct drm_display_mode *mode,
-                                  enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable,
-                                  bool is_hdmi2_sink);
+                                  enum hdmi_quantization_range rgb_quant_range);
 
 /**
  * drm_eld_mnl - Get ELD monitor name length in bytes.
@@ -471,7 +470,6 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
 enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
 bool drm_detect_hdmi_monitor(struct edid *edid);
 bool drm_detect_monitor_audio(struct edid *edid);
-bool drm_rgb_quant_range_selectable(struct edid *edid);
 enum hdmi_quantization_range
 drm_default_rgb_quant_range(const struct drm_display_mode *mode);
 int drm_add_modes_noedid(struct drm_connector *connector,
index 1107b4b1c599f4ee962ed444670d73ee2d210046..a09864f6d68489f09101c4395a8a7eaee10d9ae0 100644 (file)
@@ -27,7 +27,6 @@
 #ifndef __DRM_ENCODER_SLAVE_H__
 #define __DRM_ENCODER_SLAVE_H__
 
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_encoder.h>
 
index 8dbbe1eece1bd28eb18e4ee82788b8ad840b670f..4becb09975a457124489b9d6933b5e4829adc6b0 100644 (file)
@@ -2,31 +2,9 @@
 #ifndef __DRM_FB_CMA_HELPER_H__
 #define __DRM_FB_CMA_HELPER_H__
 
-struct drm_fbdev_cma;
-struct drm_gem_cma_object;
-
-struct drm_fb_helper_surface_size;
-struct drm_framebuffer_funcs;
-struct drm_fb_helper_funcs;
 struct drm_framebuffer;
-struct drm_fb_helper;
-struct drm_device;
-struct drm_file;
-struct drm_mode_fb_cmd2;
-struct drm_plane;
 struct drm_plane_state;
 
-int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
-                         unsigned int max_conn_count);
-void drm_fb_cma_fbdev_fini(struct drm_device *dev);
-
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
-       unsigned int preferred_bpp, unsigned int max_conn_count);
-void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
-
-void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-
 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
        unsigned int plane);
 
index bcb389f04618a25a924637e0ccbb4d4f7af340c7..b3d9d88ab2902e360b6adff982be9e5a438e7ba9 100644 (file)
@@ -143,6 +143,123 @@ struct drm_format_name_buf {
        char str[32];
 };
 
+/**
+ * drm_format_info_is_yuv_packed - check that the format info matches a YUV
+ * format with data laid in a single plane
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a packed YUV format.
+ */
+static inline bool
+drm_format_info_is_yuv_packed(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->num_planes == 1;
+}
+
+/**
+ * drm_format_info_is_yuv_semiplanar - check that the format info matches a YUV
+ * format with data laid in two planes (luminance and chrominance)
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a semiplanar YUV format.
+ */
+static inline bool
+drm_format_info_is_yuv_semiplanar(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->num_planes == 2;
+}
+
+/**
+ * drm_format_info_is_yuv_planar - check that the format info matches a YUV
+ * format with data laid in three planes (one for each YUV component)
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a planar YUV format.
+ */
+static inline bool
+drm_format_info_is_yuv_planar(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->num_planes == 3;
+}
+
+/**
+ * drm_format_info_is_yuv_sampling_410 - check that the format info matches a
+ * YUV format with 4:1:0 sub-sampling
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a YUV format with 4:1:0
+ * sub-sampling.
+ */
+static inline bool
+drm_format_info_is_yuv_sampling_410(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->hsub == 4 && info->vsub == 4;
+}
+
+/**
+ * drm_format_info_is_yuv_sampling_411 - check that the format info matches a
+ * YUV format with 4:1:1 sub-sampling
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a YUV format with 4:1:1
+ * sub-sampling.
+ */
+static inline bool
+drm_format_info_is_yuv_sampling_411(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->hsub == 4 && info->vsub == 1;
+}
+
+/**
+ * drm_format_info_is_yuv_sampling_420 - check that the format info matches a
+ * YUV format with 4:2:0 sub-sampling
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a YUV format with 4:2:0
+ * sub-sampling.
+ */
+static inline bool
+drm_format_info_is_yuv_sampling_420(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->hsub == 2 && info->vsub == 2;
+}
+
+/**
+ * drm_format_info_is_yuv_sampling_422 - check that the format info matches a
+ * YUV format with 4:2:2 sub-sampling
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a YUV format with 4:2:2
+ * sub-sampling.
+ */
+static inline bool
+drm_format_info_is_yuv_sampling_422(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->hsub == 2 && info->vsub == 1;
+}
+
+/**
+ * drm_format_info_is_yuv_sampling_444 - check that the format info matches a
+ * YUV format with 4:4:4 sub-sampling
+ * @info: format info
+ *
+ * Returns:
+ * A boolean indicating whether the format info matches a YUV format with 4:4:4
+ * sub-sampling.
+ */
+static inline bool
+drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info)
+{
+       return info->is_yuv && info->hsub == 1 && info->vsub == 1;
+}
+
 const struct drm_format_info *__drm_format_info(u32 format);
 const struct drm_format_info *drm_format_info(u32 format);
 const struct drm_format_info *
index c94acedfb08ebf4efcd3adba8365d64860905012..f0b34c977ec5765c54c4c653ce93e95325f68950 100644 (file)
 #ifndef __DRM_FRAMEBUFFER_H__
 #define __DRM_FRAMEBUFFER_H__
 
-#include <linux/list.h>
 #include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+
 #include <drm/drm_mode_object.h>
 
-struct drm_framebuffer;
-struct drm_file;
+struct drm_clip_rect;
 struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
 
 /**
  * struct drm_framebuffer_funcs - framebuffer hooks
index 07c504940ba16c169f32c58a6d25b82ae900c758..947ac95eb24a98dffb436ebffba9f2de2dfab7d4 100644 (file)
@@ -2,9 +2,12 @@
 #ifndef __DRM_GEM_CMA_HELPER_H__
 #define __DRM_GEM_CMA_HELPER_H__
 
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
 #include <drm/drm_gem.h>
 
+struct drm_mode_create_dumb;
+
 /**
  * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
  * @base: base GEM object
index a38de7eb55b49fde782b78214dfaa804052bca08..7f307e834eef3a66739cf3b3920dd14ffccfb66b 100644 (file)
@@ -25,6 +25,9 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
 struct drm_framebuffer *
 drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
                  const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *
+drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+                            const struct drm_mode_fb_cmd2 *mode_cmd);
 
 int drm_gem_fb_prepare_fb(struct drm_plane *plane,
                          struct drm_plane_state *state);
index baded65144563b219657f0082b974a8fa6faf64f..be4fed97e7273b481ba05e28d53d66b755c673f4 100644 (file)
@@ -136,8 +136,7 @@ enum drm_mode_status {
        .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
        .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
        .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
-       .vscan = (vs), .flags = (f), \
-       .base.type = DRM_MODE_OBJECT_MODE
+       .vscan = (vs), .flags = (f)
 
 #define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
 #define CRTC_STEREO_DOUBLE     (1 << 1) /* adjust timings for stereo modes */
@@ -213,20 +212,6 @@ struct drm_display_mode {
         */
        struct list_head head;
 
-       /**
-        * @base:
-        *
-        * A display mode is a normal modeset object, possibly including public
-        * userspace id.
-        *
-        * FIXME:
-        *
-        * This can probably be removed since the entire concept of userspace
-        * managing modes explicitly has never landed in upstream kernel mode
-        * setting support.
-        */
-       struct drm_mode_object base;
-
        /**
         * @name:
         *
@@ -429,14 +414,14 @@ struct drm_display_mode {
 /**
  * DRM_MODE_FMT - printf string for &struct drm_display_mode
  */
-#define DRM_MODE_FMT    "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x"
+#define DRM_MODE_FMT    "\"%s\": %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x"
 
 /**
  * DRM_MODE_ARG - printf arguments for &struct drm_display_mode
  * @m: display mode
  */
 #define DRM_MODE_ARG(m) \
-       (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \
+       (m)->name, (m)->vrefresh, (m)->clock, \
        (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \
        (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \
        (m)->type, (m)->flags
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
new file mode 100644 (file)
index 0000000..8d3ed28
--- /dev/null
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+#ifndef __DRM_PROBE_HELPER_H__
+#define __DRM_PROBE_HELPER_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_device;
+struct drm_modeset_acquire_ctx;
+
+int drm_helper_probe_single_connector_modes(struct drm_connector
+                                           *connector, uint32_t maxX,
+                                           uint32_t maxY);
+int drm_helper_probe_detect(struct drm_connector *connector,
+                           struct drm_modeset_acquire_ctx *ctx,
+                           bool force);
+void drm_kms_helper_poll_init(struct drm_device *dev);
+void drm_kms_helper_poll_fini(struct drm_device *dev);
+bool drm_helper_hpd_irq_event(struct drm_device *dev);
+void drm_kms_helper_hotplug_event(struct drm_device *dev);
+
+void drm_kms_helper_poll_disable(struct drm_device *dev);
+void drm_kms_helper_poll_enable(struct drm_device *dev);
+bool drm_kms_helper_is_poll_worker(void);
+
+#endif
index 88abdca89baadb91e0a17b3ee40cf9bd52f93e5c..07b8e9f04599801434041b7a23e5f914bbaa3a10 100644 (file)
 #ifndef _DRM_UTIL_H_
 #define _DRM_UTIL_H_
 
-/* helper for handling conditionals in various for_each macros */
+/**
+ * DOC: drm utils
+ *
+ * Macros and inline functions that does not naturally belong in other places
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kgdb.h>
+#include <linux/preempt.h>
+#include <linux/smp.h>
+
+/*
+ * Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
+ * only be visible for drmselftests.
+ */
+#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
+#else
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
+#endif
+
+/**
+ * for_each_if - helper for handling conditionals in various for_each macros
+ * @condition: The condition to check
+ *
+ * Typical use::
+ *
+ *     #define for_each_foo_bar(x, y) \'
+ *             list_for_each_entry(x, y->list, head) \'
+ *                     for_each_if(x->something == SOMETHING)
+ *
+ * The for_each_if() macro makes the use of for_each_foo_bar() less error
+ * prone.
+ */
 #define for_each_if(condition) if (!(condition)) {} else
 
+/**
+ * drm_can_sleep - returns true if currently okay to sleep
+ *
+ * This function shall not be used in new code.
+ * The check for running in atomic context may not work - see linux/preempt.h.
+ *
+ * FIXME: All users of drm_can_sleep should be removed (see todo.rst)
+ *
+ * Returns:
+ * False if kgdb is active, we are in atomic context or irqs are disabled.
+ */
+static inline bool drm_can_sleep(void)
+{
+       if (in_atomic() || in_dbg_master() || irqs_disabled())
+               return false;
+       return true;
+}
+
 #endif
index 6ad9630d4f48e04c991a68753e580c7f173e51d9..e528bb2f659d95296062c8206dc1b3f2bad29241 100644 (file)
@@ -128,6 +128,26 @@ struct drm_vblank_crtc {
         * @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
         */
        u32 last;
+       /**
+        * @max_vblank_count:
+        *
+        * Maximum value of the vblank registers for this crtc. This value +1
+        * will result in a wrap-around of the vblank register. It is used
+        * by the vblank core to handle wrap-arounds.
+        *
+        * If set to zero the vblank core will try to guess the elapsed vblanks
+        * between times when the vblank interrupt is disabled through
+        * high-precision timestamps. That approach is suffering from small
+        * races and imprecision over longer time periods, hence exposing a
+        * hardware vblank counter is always recommended.
+        *
+        * This is the runtime configurable per-crtc maximum set through
+        * drm_crtc_set_max_vblank_count(). If this is used the driver
+        * must leave the device wide &drm_device.max_vblank_count at zero.
+        *
+        * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
+        */
+       u32 max_vblank_count;
        /**
         * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
         * For legacy driver bit 2 additionally tracks whether an additional
@@ -206,4 +226,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
 void drm_calc_timestamping_constants(struct drm_crtc *crtc,
                                     const struct drm_display_mode *mode);
 wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
+void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
+                                  u32 max_vblank_count);
 #endif
index 192667144693a0ab3adb7ae12d04a420b7567b37..d2fad7b0fcf65a7334cf074c40ab2ce738301dbb 100644 (file)
        INTEL_VGA_DEVICE(0x3E9A, info)  /* SRV GT2 */
 
 /* CFL H */
+#define INTEL_CFL_H_GT1_IDS(info) \
+       INTEL_VGA_DEVICE(0x3E9C, info)
+
 #define INTEL_CFL_H_GT2_IDS(info) \
        INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
        INTEL_VGA_DEVICE(0x3E94, info)  /* Halo GT2 */
 #define INTEL_CFL_IDS(info)       \
        INTEL_CFL_S_GT1_IDS(info), \
        INTEL_CFL_S_GT2_IDS(info), \
+       INTEL_CFL_H_GT1_IDS(info), \
        INTEL_CFL_H_GT2_IDS(info), \
        INTEL_CFL_U_GT2_IDS(info), \
        INTEL_CFL_U_GT3_IDS(info), \
        INTEL_VGA_DEVICE(0x8A51, info), \
        INTEL_VGA_DEVICE(0x8A5C, info), \
        INTEL_VGA_DEVICE(0x8A5D, info), \
+       INTEL_VGA_DEVICE(0x8A59, info), \
+       INTEL_VGA_DEVICE(0x8A58, info), \
        INTEL_VGA_DEVICE(0x8A52, info), \
        INTEL_VGA_DEVICE(0x8A5A, info), \
        INTEL_VGA_DEVICE(0x8A5B, info), \
+       INTEL_VGA_DEVICE(0x8A57, info), \
+       INTEL_VGA_DEVICE(0x8A56, info), \
        INTEL_VGA_DEVICE(0x8A71, info), \
        INTEL_VGA_DEVICE(0x8A70, info)
 
index b8ba5886198678b9fd490a8794e8c794fe47bd47..f4ec2834bc229ba55939ea8a3a64aeb866536d2d 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <drm/tinydrm/tinydrm.h>
 
+struct drm_rect;
 struct spi_device;
 struct gpio_desc;
 struct regulator;
@@ -67,6 +68,8 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
                  const struct drm_simple_display_pipe_funcs *pipe_funcs,
                  struct drm_driver *driver,
                  const struct drm_display_mode *mode, unsigned int rotation);
+void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
+                         struct drm_plane_state *old_state);
 void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
                           struct drm_crtc_state *crtc_state,
                           struct drm_plane_state *plan_state);
@@ -80,7 +83,7 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
 int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
 int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
 int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
-                     struct drm_clip_rect *clip, bool swap);
+                     struct drm_rect *clip, bool swap);
 /**
  * mipi_dbi_command - MIPI DCS command with optional parameter(s)
  * @mipi: MIPI structure
index 5b96f0b12c8cefb76554abf8faf281a5689634e5..f0d598789e4d34adbd461215c6782df6337735ae 100644 (file)
@@ -11,8 +11,8 @@
 #define __LINUX_TINYDRM_HELPERS_H
 
 struct backlight_device;
-struct tinydrm_device;
-struct drm_clip_rect;
+struct drm_framebuffer;
+struct drm_rect;
 struct spi_transfer;
 struct spi_message;
 struct spi_device;
@@ -33,23 +33,15 @@ static inline bool tinydrm_machine_little_endian(void)
 #endif
 }
 
-bool tinydrm_merge_clips(struct drm_clip_rect *dst,
-                        struct drm_clip_rect *src, unsigned int num_clips,
-                        unsigned int flags, u32 max_width, u32 max_height);
-int tinydrm_fb_dirty(struct drm_framebuffer *fb,
-                    struct drm_file *file_priv,
-                    unsigned int flags, unsigned int color,
-                    struct drm_clip_rect *clips,
-                    unsigned int num_clips);
 void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
-                   struct drm_clip_rect *clip);
+                   struct drm_rect *clip);
 void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
-                   struct drm_clip_rect *clip);
+                   struct drm_rect *clip);
 void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
                                struct drm_framebuffer *fb,
-                               struct drm_clip_rect *clip, bool swap);
+                               struct drm_rect *clip, bool swap);
 void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
-                              struct drm_clip_rect *clip);
+                              struct drm_rect *clip);
 
 size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len);
 bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw);
index 448aa5ea4722da2007fcfe8ee79855ccb22bb6ef..5621688edcc0aec7f9f23fd4ab60886a45502c8b 100644 (file)
 #ifndef __LINUX_TINYDRM_H
 #define __LINUX_TINYDRM_H
 
-#include <linux/mutex.h>
 #include <drm/drm_simple_kms_helper.h>
 
-struct drm_clip_rect;
 struct drm_driver;
-struct drm_file;
-struct drm_framebuffer;
-struct drm_framebuffer_funcs;
 
 /**
  * struct tinydrm_device - tinydrm device
@@ -32,24 +27,6 @@ struct tinydrm_device {
         * @pipe: Display pipe structure
         */
        struct drm_simple_display_pipe pipe;
-
-       /**
-        * @dirty_lock: Serializes framebuffer flushing
-        */
-       struct mutex dirty_lock;
-
-       /**
-        * @fb_funcs: Framebuffer functions used when creating framebuffers
-        */
-       const struct drm_framebuffer_funcs *fb_funcs;
-
-       /**
-        * @fb_dirty: Framebuffer dirty callback
-        */
-       int (*fb_dirty)(struct drm_framebuffer *framebuffer,
-                       struct drm_file *file_priv, unsigned flags,
-                       unsigned color, struct drm_clip_rect *clips,
-                       unsigned num_clips);
 };
 
 static inline struct tinydrm_device *
@@ -82,13 +59,10 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
        .clock = 1 /* pass validation */
 
 int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
-                     const struct drm_framebuffer_funcs *fb_funcs,
                      struct drm_driver *driver);
 int devm_tinydrm_register(struct tinydrm_device *tdev);
 void tinydrm_shutdown(struct tinydrm_device *tdev);
 
-void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
-                                struct drm_plane_state *old_state);
 int
 tinydrm_display_pipe_init(struct tinydrm_device *tdev,
                          const struct drm_simple_display_pipe_funcs *funcs,
index bc8940ca280dc74c36c706dc6d7c831a39381cc4..c0ff417b477063d50eb7e168679843001a122efe 100644 (file)
@@ -40,6 +40,7 @@ struct dma_fence_array_cb {
  * @num_fences: number of fences in the array
  * @num_pending: fences in the array still pending
  * @fences: array of the fences
+ * @work: internal irq_work function
  */
 struct dma_fence_array {
        struct dma_fence base;
index d2bacf5024295dcfb6092de230179e6f78117c3f..927ad6451105a43397e3942860012cbcd0ad9d28 100644 (file)
 #include <linux/types.h>
 #include <linux/device.h>
 
+enum hdmi_packet_type {
+       HDMI_PACKET_TYPE_NULL = 0x00,
+       HDMI_PACKET_TYPE_AUDIO_CLOCK_REGEN = 0x01,
+       HDMI_PACKET_TYPE_AUDIO_SAMPLE = 0x02,
+       HDMI_PACKET_TYPE_GENERAL_CONTROL = 0x03,
+       HDMI_PACKET_TYPE_ACP = 0x04,
+       HDMI_PACKET_TYPE_ISRC1 = 0x05,
+       HDMI_PACKET_TYPE_ISRC2 = 0x06,
+       HDMI_PACKET_TYPE_ONE_BIT_AUDIO_SAMPLE = 0x07,
+       HDMI_PACKET_TYPE_DST_AUDIO = 0x08,
+       HDMI_PACKET_TYPE_HBR_AUDIO_STREAM = 0x09,
+       HDMI_PACKET_TYPE_GAMUT_METADATA = 0x0a,
+       /* + enum hdmi_infoframe_type */
+};
+
 enum hdmi_infoframe_type {
        HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
        HDMI_INFOFRAME_TYPE_AVI = 0x82,
index ed1dfba5e5f9e64eabb6c70fa837ece75cbf463f..bfecd6bd499057e29962aefa4ec74a8ff2f3273c 100644 (file)
@@ -26,4 +26,7 @@ struct intel_soc_pmic {
        struct device *dev;
 };
 
+int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
+                                             u32 value, u32 mask);
+
 #endif /* __INTEL_SOC_PMIC_H__ */
index a37ef73092e5730db801685d0b0e7433adeeec8c..3d340b6f1ea3817adfd293aad0ed0a96a6b2481b 100644 (file)
@@ -80,6 +80,32 @@ TRACE_EVENT(host1x_cdma_push,
                __entry->name, __entry->op1, __entry->op2)
 );
 
+TRACE_EVENT(host1x_cdma_push_wide,
+       TP_PROTO(const char *name, u32 op1, u32 op2, u32 op3, u32 op4),
+
+       TP_ARGS(name, op1, op2, op3, op4),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(u32, op1)
+               __field(u32, op2)
+               __field(u32, op3)
+               __field(u32, op4)
+       ),
+
+       TP_fast_assign(
+               __entry->name = name;
+               __entry->op1 = op1;
+               __entry->op2 = op2;
+               __entry->op3 = op3;
+               __entry->op4 = op4;
+       ),
+
+       TP_printk("name=%s, op1=%08x, op2=%08x, op3=%08x op4=%08x",
+               __entry->name, __entry->op1, __entry->op2, __entry->op3,
+               __entry->op4)
+);
+
 TRACE_EVENT(host1x_cdma_push_gather,
        TP_PROTO(const char *name, struct host1x_bo *bo,
                        u32 words, u32 offset, void *cmdbuf),
index 0b44260a5ee969a4abbb1c94358bd8ae08e28193..93a341d278a67406168864bf673c0cb7a5fdc5bf 100644 (file)
@@ -238,6 +238,8 @@ extern "C" {
 #define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
 #define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
 #define DRM_FORMAT_MOD_VENDOR_ARM     0x08
+#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
+
 /* add more to the end as needed */
 
 #define DRM_FORMAT_RESERVED          ((1ULL << 56) - 1)
@@ -572,6 +574,9 @@ extern "C" {
  * AFBC has several features which may be supported and/or used, which are
  * represented using bits in the modifier. Not all combinations are valid,
  * and different devices or use-cases may support different combinations.
+ *
+ * Further information on the use of AFBC modifiers can be found in
+ * Documentation/gpu/afbc.rst
  */
 #define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode)   fourcc_mod_code(ARM, __afbc_mode)
 
@@ -581,10 +586,18 @@ extern "C" {
  * Indicates the superblock size(s) used for the AFBC buffer. The buffer
  * size (in pixels) must be aligned to a multiple of the superblock size.
  * Four lowest significant bits(LSBs) are reserved for block size.
+ *
+ * Where one superblock size is specified, it applies to all planes of the
+ * buffer (e.g. 16x16, 32x8). When multiple superblock sizes are specified,
+ * the first applies to the Luma plane and the second applies to the Chroma
+ * plane(s). e.g. (32x8_64x4 means 32x8 Luma, with 64x4 Chroma).
+ * Multiple superblock sizes are only valid for multi-plane YCbCr formats.
  */
 #define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK      0xf
 #define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16     (1ULL)
 #define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8      (2ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_64x4      (3ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4 (4ULL)
 
 /*
  * AFBC lossless colorspace transform
@@ -644,6 +657,35 @@ extern "C" {
  */
 #define AFBC_FORMAT_MOD_SC      (1ULL <<  9)
 
+/*
+ * AFBC double-buffer
+ *
+ * Indicates that the buffer is allocated in a layout safe for front-buffer
+ * rendering.
+ */
+#define AFBC_FORMAT_MOD_DB      (1ULL << 10)
+
+/*
+ * AFBC buffer content hints
+ *
+ * Indicates that the buffer includes per-superblock content hints.
+ */
+#define AFBC_FORMAT_MOD_BCH     (1ULL << 11)
+
+/*
+ * Allwinner tiled modifier
+ *
+ * This tiling mode is implemented by the VPU found on all Allwinner platforms,
+ * codenamed sunxi. It is associated with a YUV format that uses either 2 or 3
+ * planes.
+ *
+ * With this tiling, the luminance samples are disposed in tiles representing
+ * 32x32 pixels and the chrominance samples in tiles representing 32x64 pixels.
+ * The pixel order in each tile is linear and the tiles are disposed linearly,
+ * both in row-major order.
+ */
+#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1)
+
 #if defined(__cplusplus)
 }
 #endif
index 298b2e197744bbc28782d1a853e1ee3577f02bee..397810fa2d33c95f69770bdf3563ea44213b40c6 100644 (file)
@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param {
 #define   I915_CONTEXT_MAX_USER_PRIORITY       1023 /* inclusive */
 #define   I915_CONTEXT_DEFAULT_PRIORITY                0
 #define   I915_CONTEXT_MIN_USER_PRIORITY       -1023 /* inclusive */
+       /*
+        * When using the following param, value should be a pointer to
+        * drm_i915_gem_context_param_sseu.
+        */
+#define I915_CONTEXT_PARAM_SSEU                0x7
        __u64 value;
 };
 
+/**
+ * Context SSEU programming
+ *
+ * It may be necessary for either functional or performance reason to configure
+ * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
+ * Sub-slice/EU).
+ *
+ * This is done by configuring SSEU configuration using the below
+ * @struct drm_i915_gem_context_param_sseu for every supported engine which
+ * userspace intends to use.
+ *
+ * Not all GPUs or engines support this functionality in which case an error
+ * code -ENODEV will be returned.
+ *
+ * Also, flexibility of possible SSEU configuration permutations varies between
+ * GPU generations and software imposed limitations. Requesting such a
+ * combination will return an error code of -EINVAL.
+ *
+ * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
+ * favour of a single global setting.
+ */
+struct drm_i915_gem_context_param_sseu {
+       /*
+        * Engine class & instance to be configured or queried.
+        */
+       __u16 engine_class;
+       __u16 engine_instance;
+
+       /*
+        * Unused for now. Must be cleared to zero.
+        */
+       __u32 flags;
+
+       /*
+        * Mask of slices to enable for the context. Valid values are a subset
+        * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+        */
+       __u64 slice_mask;
+
+       /*
+        * Mask of subslices to enable for the context. Valid values are a
+        * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+        */
+       __u64 subslice_mask;
+
+       /*
+        * Minimum/Maximum number of EUs to enable per subslice for the
+        * context. min_eus_per_subslice must be inferior or equal to
+        * max_eus_per_subslice.
+        */
+       __u16 min_eus_per_subslice;
+       __u16 max_eus_per_subslice;
+
+       /*
+        * Unused for now. Must be cleared to zero.
+        */
+       __u32 rsvd;
+};
+
 enum drm_i915_oa_format {
        I915_OA_FORMAT_A13 = 1,     /* HSW only */
        I915_OA_FORMAT_A29,         /* HSW only */